diff --git a/examples/01_parallel_mpo_sum_2d_hubbard_conserve_momentum.jl b/examples/01_parallel_mpo_sum_2d_hubbard_conserve_momentum.jl index 21291e1..9a32178 100644 --- a/examples/01_parallel_mpo_sum_2d_hubbard_conserve_momentum.jl +++ b/examples/01_parallel_mpo_sum_2d_hubbard_conserve_momentum.jl @@ -19,18 +19,18 @@ Run with: ```julia # Sequential sum over MPOs. # Uses the default `Sum=SequentialSum`. -main(; Nx=8, Ny=4, maxdim=1000); -main(; Nx=8, Ny=4, maxdim=1000, threaded_blocksparse=true); +main(; Nx=8, Ny=4, nsweeps=10, maxdim=1000); +main(; Nx=8, Ny=4, nsweeps=10, maxdim=1000, threaded_blocksparse=true); # Threaded sum over MPOs. -main(; Nx=8, Ny=4, maxdim=1000, Sum=ThreadedSum); -main(; Nx=8, Ny=4, maxdim=1000, Sum=ThreadedSum, threaded_blocksparse=true); +main(; Nx=8, Ny=4, nsweeps=10, maxdim=1000, Sum=ThreadedSum); +main(; Nx=8, Ny=4, nsweeps=10, maxdim=1000, Sum=ThreadedSum, threaded_blocksparse=true); # Distributed sum over MPOs, where terms of the MPO # sum and their environments are stored, updated, # and applied remotely on a worker process. -main(; Nx=8, Ny=4, maxdim=1000, Sum=DistributedSum); -main(; Nx=8, Ny=4, maxdim=1000, Sum=DistributedSum, threaded_blocksparse=true); +main(; Nx=8, Ny=4, nsweeps=10, maxdim=1000, Sum=DistributedSum); +main(; Nx=8, Ny=4, nsweeps=10, maxdim=1000, Sum=DistributedSum, threaded_blocksparse=true); # Using write-to-disk. main(; Nx=8, Ny=4, maxdim=1000, Sum=DistributedSum, disk=true, threaded_blocksparse=true); @@ -41,6 +41,7 @@ function main(; Ny::Int, U::Float64=4.0, t::Float64=1.0, + nsweeps=10, maxdim::Int=3000, conserve_ky=true, seed=1234, @@ -58,7 +59,6 @@ function main(; N = Nx * Ny - nsweeps = 10 max_maxdim = maxdim maxdim = min.([100, 200, 400, 800, 2000, 3000, max_maxdim], max_maxdim) cutoff = 1e-6 diff --git a/examples/02_mpi_mpo_sum_2d_hubbard_conserve_momentum.jl b/examples/02_mpi_mpo_sum_2d_hubbard_conserve_momentum.jl index f5ac35f..f86efc1 100644 --- a/examples/02_mpi_mpo_sum_2d_hubbard_conserve_momentum.jl +++ b/examples/02_mpi_mpo_sum_2d_hubbard_conserve_momentum.jl @@ -12,11 +12,11 @@ ITensors.Strided.disable_threads() """ Run at the command line with 4 processes: ```julia -mpiexecjl -n 2 julia -t2 02_mpi_run.jl --Nx 8 --Ny 4 --maxdim 1000 +mpiexecjl -n 2 julia -t2 02_mpi_run.jl --Nx 8 --Ny 4 --nsweeps 10 --maxdim 1000 -mpiexecjl -n 2 julia -t2 02_mpi_run.jl --Nx 8 --Ny 4 --maxdim 1000 --threaded_blocksparse true +mpiexecjl -n 2 julia -t2 02_mpi_run.jl --Nx 8 --Ny 4 --nsweeps 10 --maxdim 1000 --threaded_blocksparse true -mpiexecjl -n 2 julia -t2 02_mpi_run.jl --Nx 8 --Ny 4 --maxdim 1000 --disk true --threaded_blocksparse true +mpiexecjl -n 2 julia -t2 02_mpi_run.jl --Nx 8 --Ny 4 --nsweeps 10 --maxdim 1000 --disk true --threaded_blocksparse true ``` """ function main(; @@ -24,6 +24,7 @@ function main(; Ny::Int, U::Float64=4.0, t::Float64=1.0, + nsweeps=10, maxdim::Int=3000, conserve_ky=true, seed=1234, @@ -39,7 +40,6 @@ function main(; N = Nx * Ny - nsweeps = 10 max_maxdim = maxdim maxdim = min.([100, 200, 400, 800, 2000, 3000, max_maxdim], max_maxdim) cutoff = 1e-6 diff --git a/examples/02_mpi_run.jl b/examples/02_mpi_run.jl index 7a9e83e..55eb31e 100644 --- a/examples/02_mpi_run.jl +++ b/examples/02_mpi_run.jl @@ -2,7 +2,7 @@ include("02_mpi_mpo_sum_2d_hubbard_conserve_momentum.jl") # Run with: -# mpiexecjl -n 2 julia 02_mpi_run.jl --Nx 8 --Ny 4 --maxdim 1000 +# mpiexecjl -n 2 julia 02_mpi_run.jl --Nx 8 --Ny 4 --nsweeps 10 --maxdim 1000 using ArgParse function parse_commandline() @@ -16,6 +16,10 @@ function parse_commandline() help = "Cylinder width" arg_type = Int required = true + "--nsweeps" + help = "Number of sweeps" + arg_type = Int + required = true "--maxdim" help = "Maximum bond dimension" arg_type = Int @@ -35,6 +39,7 @@ args = parse_commandline() main(; Nx=args["Nx"], Ny=args["Ny"], + nsweeps=args["nsweeps"], maxdim=args["maxdim"], disk=args["disk"], threaded_blocksparse=args["threaded_blocksparse"], diff --git a/test/test_mpi_example.jl b/test/test_mpi_example.jl index 8f8bf2e..6f74747 100644 --- a/test/test_mpi_example.jl +++ b/test/test_mpi_example.jl @@ -16,10 +16,11 @@ using Test nprocs = 2 Nx = 8 Ny = 4 + nsweeps = 2 maxdim = 20 mpiexec() do exe # MPI wrapper run( - `$exe -n $(nprocs) $(Base.julia_cmd()) --threads $(Threads.nthreads()) $(joinpath(examples_dir, example_file)) --Nx $(Nx) --Ny $(Ny) --maxdim $(maxdim) --disk $(disk) --threaded_blocksparse $(threaded_blocksparse)`, + `$exe -n $(nprocs) $(Base.julia_cmd()) --threads $(Threads.nthreads()) $(joinpath(examples_dir, example_file)) --Nx $(Nx) --Ny $(Ny) --nsweeps $(nsweeps) --maxdim $(maxdim) --disk $(disk) --threaded_blocksparse $(threaded_blocksparse)`, ) end end diff --git a/test/test_sequential_threaded_distributed_example.jl b/test/test_sequential_threaded_distributed_example.jl index e444ad3..e6d556f 100644 --- a/test/test_sequential_threaded_distributed_example.jl +++ b/test/test_sequential_threaded_distributed_example.jl @@ -11,6 +11,7 @@ using Test include(joinpath(examples_dir, example_file)) Nx = 8 Ny = 4 + nsweeps = 2 maxdim = 20 Sums = (SequentialSum, ThreadedSum, DistributedSum) @testset "Sum type $Sum, threaded block sparse $threaded_blocksparse, write-to-disk $disk" for Sum in @@ -21,7 +22,7 @@ using Test println( "\nRunning parallel test with $(Sum), threaded block sparse $threaded_blocksparse, write-to-disk $disk", ) - main(; Nx, Ny, maxdim, Sum, disk, threaded_blocksparse) + main(; Nx, Ny, nsweeps, maxdim, Sum, disk, threaded_blocksparse) end end end