Skip to content

Commit

Permalink
improve infrastructure and cleanups
Browse files Browse the repository at this point in the history
  • Loading branch information
tknopp committed Oct 8, 2023
1 parent 689df2a commit bad947b
Show file tree
Hide file tree
Showing 17 changed files with 484 additions and 372 deletions.
5 changes: 2 additions & 3 deletions NeuralMNP/src/NeuralMNP.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,9 @@ using Plots

using ImageFiltering

struct NeuralNetworkMNPAlg <: MNPAlgorithm end
const NeuralNetworkMNP = NeuralNetworkMNPAlg()
struct NeuralOperatorModel <: AbstractMNPModel end

export NeuralNetworkMNP
export NeuralOperatorModel

@enum FieldType begin
RANDOM_FIELD
Expand Down
10 changes: 5 additions & 5 deletions NeuralMNP/src/neuralOperator.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ function make_neural_operator_model(inputChan, outputChan, modes, width, transfo
OperatorKernel(width=>width, (modes, ), transform, gelu, permuted=permuted),
OperatorKernel(width=>width, (modes, ), transform, permuted=permuted),
# project back to the scalar field of interest space
permuted ? Conv((1,), width=>128, gelu) : Dense(width, 128, gelu),
permuted ? Conv((1,), 128=>outputChan) : Dense(128, outputChan),
permuted ? Conv((1,), width=>width, gelu) : Dense(width, width, gelu),
permuted ? Conv((1,), width=>outputChan) : Dense(width, outputChan),
)
end

Expand All @@ -33,8 +33,8 @@ function make_unet_neural_operator_model(inputChan, outputChan, modes, width, tr
OperatorUNOKernel(width=>width, (modes, ), transform, gelu, permuted=permuted),
OperatorUNOKernel(width=>width, (modes, ), transform, permuted=permuted),
# project back to the scalar field of interest space
permuted ? Conv((1,), width=>128, gelu) : Dense(width, 128, gelu),
permuted ? Conv((1,), 128=>outputChan) : Dense(128, outputChan),
permuted ? Conv((1,), width=>width, gelu) : Dense(width, width, gelu),
permuted ? Conv((1,), width=>outputChan) : Dense(width, outputChan),
)
end

Expand Down Expand Up @@ -378,7 +378,7 @@ function applyToArbitrarySignal(neuralNetwork::NeuralNetwork, X, snippetLength;
end


function MNPDynamics.simulationMNP(B::g, t, ::NeuralNetworkMNPAlg;
function MNPDynamics.simulationMNP(B::g, t, ::NeuralOperatorModel;
neuralNetwork::NeuralNetwork,
kargs...
) where g
Expand Down
3 changes: 2 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,17 @@ ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca"
SharedArrays = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
SparseDiffTools = "47a9eef4-7e08-11e9-0b38-333d64bd3804"
NeuralOperators = "ea5c82af-86e5-48da-8ee1-382d6ad7af4b"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Interpolations = "a98d9a8b-a2ab-59e6-89dd-64a1c18fca59"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
FLoops = "cc61a311-1640-44b5-9fba-1b764f453329"

[compat]
LinearSolve = "1.23"
julia = "^1"
FLoops = "0.2"

[extras]
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Expand Down
6 changes: 3 additions & 3 deletions examples/iwmpi/evalMPSData.jl
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ function plotExampleSignals(model, kAnis=1100, offset=[0,0,0])
@time y = simulationMNP(B, t; p...)
pNO = copy(p)
pNO[:neuralNetwork] = model
pNO[:alg] = NeuralNetworkMNP
pNO[:model] = NeuralOperatorModel()
yNO = simulationMNP(B, t; pNO...)

plot!(pl1, t[:], y[:,1], lw=2, c=d, label="D=$(DCore[d]) nm true", legend = :outertopright)
Expand All @@ -59,7 +59,7 @@ function plotExampleSignals(model, kAnis=1100, offset=[0,0,0])
@time y = simulationMNP(B, t; p...)
pNO = copy(p)
pNO[:neuralNetwork] = model
pNO[:alg] = NeuralNetworkMNP
pNO[:model] = NeuralOperatorModel()
yNO = simulationMNP(B, t; pNO...)

plot!(pl2, t[:], y[:,1], lw=2, c=k, label="kAnis=$(kAnis[k]) true", legend = :outertopright)
Expand All @@ -80,7 +80,7 @@ function plotExampleSignals(model, kAnis=1100, offset=[0,0,0])
@time y = simulationMNP(B, t; p...)
pNO = copy(p)
pNO[:neuralNetwork] = model
pNO[:alg] = NeuralNetworkMNP
pNO[:model] = NeuralOperatorModel()
yNO = simulationMNP(B, t; pNO...)

plot!(pl3, t[:], y[:,1], lw=2, c=k, label="off=$(off[k]) mT true", legend = :outertopright)
Expand Down
4 changes: 2 additions & 2 deletions examples/iwmpi/evaluationSM2D.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ function calcSMs(p; device=cpu)
@time sm[:Immobilized45FNO] = calcSM(p; device)

delete!(p, :neuralNetwork)
delete!(p, :alg)
delete!(p, :model)

p[:anisotropyAxis] = nothing
@time sm[:FluidFokkerPlanck] = calcSM(p; device)
Expand All @@ -46,7 +46,7 @@ pSM[:DCore] = 20e-9 # particle diameter in nm
pSM[:kAnis] = 1250 # anisotropy constant
pSM[:derivative] = false
pSM[:neuralNetwork] = NOModel
pSM[:alg] = NeuralNetworkMNP
pSM[:model] = NeuralOperatorModel()
N = 30
pSM[:nOffsets] = (N, N, 1)
pSM[:maxField] = 0.012
Expand Down
2 changes: 1 addition & 1 deletion examples/iwmpi/evaluationSM3D.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ pSM[:DCore] = 20e-9 # particle diameter in nm
pSM[:kAnis] = 1250 # anisotropy constant
pSM[:derivative] = false
pSM[:neuralNetwork] = NOModel
pSM[:alg] = NeuralNetworkMNP
pSM[:model] = NeuralOperatorModel()
N = 10
pSM[:nOffsets] = (N, N, N)
pSM[:maxField] = 0.012
Expand Down
10 changes: 5 additions & 5 deletions examples/paper/evalMPSData.jl
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ function plotExampleSignals(model)
pNO = copy(p)

pNO[:neuralNetwork] = model
pNO[:alg] = NeuralNetworkMNP
pNO[:model] = NeuralOperatorModel()
yNO = simulationMNP(B, t; pNO...)

CairoMakie.lines!(ax1, t[:], y[:,1]/1e6,
Expand All @@ -87,7 +87,7 @@ function plotExampleSignals(model)
@time y = simulationMNP(B, t; p...)
pNO = copy(p)
pNO[:neuralNetwork] = model
pNO[:alg] = NeuralNetworkMNP
pNO[:model] = NeuralOperatorModel()
yNO = simulationMNP(B, t; pNO...)

CairoMakie.lines!(ax2, t[:], y[:,1]/1e6,
Expand All @@ -111,7 +111,7 @@ function plotExampleSignals(model)
@time y = simulationMNP(B, t; p...)
pNO = copy(p)
pNO[:neuralNetwork] = model
pNO[:alg] = NeuralNetworkMNP
pNO[:model] = NeuralOperatorModel()
yNO = simulationMNP(B, t; pNO...)

CairoMakie.lines!(ax3, t[:], y[:,1]/1e6,
Expand All @@ -135,7 +135,7 @@ function plotExampleSignals(model)
@time y = simulationMNP(B, t; p...)
pNO = copy(p)
pNO[:neuralNetwork] = model
pNO[:alg] = NeuralNetworkMNP
pNO[:model] = NeuralOperatorModel()
yNO = simulationMNP(B, t; pNO...)

CairoMakie.lines!(ax4, t[:], y[:,1]/1e6,
Expand All @@ -158,7 +158,7 @@ function plotExampleSignals(model)
@time y = simulationMNP(B, t; p...)
pNO = copy(p)
pNO[:neuralNetwork] = model
pNO[:alg] = NeuralNetworkMNP
pNO[:model] = NeuralOperatorModel()
yNO = simulationMNP(B, t; pNO...)

CairoMakie.lines!(ax5, t[:], y[:,1]/1e6,
Expand Down
4 changes: 2 additions & 2 deletions examples/paper/evaluationSM2D.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ function calcSMs(p; device=gpu)
@time sm[:Immobilized45FNO] = calcSM(p; device)

delete!(p, :neuralNetwork)
delete!(p, :alg)
delete!(p, :model)

p[:anisotropyAxis] = nothing
@time sm[:FluidFokkerPlanck] = calcSM(p; device)
Expand All @@ -44,7 +44,7 @@ pSM[:DCore] = 20e-9 # particle diameter in nm
pSM[:kAnis] = 1250 # anisotropy constant
pSM[:derivative] = false
pSM[:neuralNetwork] = NOModel
pSM[:alg] = NeuralNetworkMNP
pSM[:model] = NeuralOperatorModel()
N = 30
pSM[:nOffsets] = (N, N, 1)
pSM[:maxField] = 0.012
Expand Down
2 changes: 1 addition & 1 deletion examples/paper/evaluationSM3D.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ pSM[:DCore] = 20e-9 # particle diameter in nm
pSM[:kAnis] = 1250 # anisotropy constant
pSM[:derivative] = false
pSM[:neuralNetwork] = NOModel
pSM[:alg] = NeuralNetworkMNP
pSM[:model] = NeuralOperatorModel()
N = 10
pSM[:nOffsets] = (N, N, N)
pSM[:maxField] = 0.012
Expand Down
18 changes: 11 additions & 7 deletions src/MNPDynamics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,17 @@ using ProgressMeter

using HDF5

using FLoops

## sorting algorithms ##

abstract type MNPAlgorithm end
abstract type AbstractMNPModel end

struct FokkerPlanckAlg <: MNPAlgorithm end
struct LangevinFunctionAlg <: MNPAlgorithm end
struct FokkerPlanckModel <: AbstractMNPModel end
struct EquilibriumModel <: AbstractMNPModel end
struct EquilibriumAnisModel <: AbstractMNPModel end

const FokkerPlanck = FokkerPlanckAlg()
const LangevinFunction = LangevinFunctionAlg()

export MNPAlgorithm, FokkerPlanck, LangevinFunction
export AbstractMNPModel, FokkerPlanckModel, EquilibriumModel, EquilibriumAnisModel

@enum RelaxationType begin
NEEL
Expand All @@ -39,8 +38,13 @@ end

export NEEL, BROWN, NO_RELAXATION

export EnsembleThreads, EnsembleDistributed, EnsembleSerial

include("utils.jl")
include("sparseMatrixSetup.jl")
include("fokkerPlanck.jl")
include("equilibrium.jl")
include("equilibriumAnis.jl")
include("simulation.jl")
include("multiParams.jl")

Expand Down
26 changes: 26 additions & 0 deletions src/equilibrium.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@

function simulationMNP(B::g, tVec, ::EquilibriumModel;
MS = 474000.0,
DCore = 20e-9,
temp = 293.0,
derivative = false,
kargs...
) where g


y = zeros(Float64, length(tVec), 3)

dt = step(tVec)/10

if !derivative
for ti=1:length(tVec)
y[ti, :] = langevin(B(tVec[ti]); DCore, temp, MS)
end
else
for ti=1:length(tVec)
y[ti, :] = (langevin(B(tVec[ti]+dt); DCore, temp, MS)-langevin(B(tVec[ti]); DCore, temp, MS)) / dt
end
end

return y
end
27 changes: 27 additions & 0 deletions src/equilibriumAnis.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@

function simulationMNP(B::g, tVec, ::EquilibriumAnisModel;
MS = 474000.0,
DCore = 20e-9,
temp = 293.0,
kAnis = 625,
derivative = false,
kargs...
) where g


y = zeros(Float64, length(tVec), 3)

dt = step(tVec)/10

if !derivative
for ti=1:length(tVec)
y[ti, :] = langevin(B(tVec[ti]); DCore, temp, MS)
end
else
for ti=1:length(tVec)
y[ti, :] = (langevin(B(tVec[ti]+dt); DCore, temp, MS)-langevin(B(tVec[ti]); DCore, temp, MS)) / dt
end
end

return y
end
Loading

0 comments on commit bad947b

Please sign in to comment.