Skip to content

Commit

Permalink
Cleaning
Browse files Browse the repository at this point in the history
  • Loading branch information
castelletto1 committed May 16, 2024
1 parent f0e2131 commit 31d3ca9
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 17 deletions.
23 changes: 7 additions & 16 deletions src/docs/JOSS/paper.bib
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,17 @@ @article{Settgast:2017
volume = {41},
number = {5},
pages = {627-653},
keywords = {hydraulic fracture, fracture mechanics, geomechanics, high-performance computing},
doi = {10.1002/nag.2557},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/nag.2557},
eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1002/nag.2557},
abstract = {Summary This paper describes a fully coupled finite element/finite volume approach for simulating field-scale hydraulically driven fractures in three dimensions, using massively parallel computing platforms. The proposed method is capable of capturing realistic representations of local heterogeneities, layering and natural fracture networks in a reservoir. A detailed description of the numerical implementation is provided, along with numerical studies comparing the model with both analytical solutions and experimental results. The results demonstrate the effectiveness of the proposed method for modeling large-scale problems involving hydraulically driven fractures in three dimensions. © 2016 The Authors. International Journal for Numerical and Analytical Methods in Geomechanics published by John Wiley \& Sons Ltd.},
year = {2017}
}


@article{Beckingsale:2019,
abstract = {Modern high-performance computing systems are diverse, with hardware designs ranging from homogeneous multi- core CPUs to GPU or FPGA accelerated systems. Achieving desir- able application performance often requires choosing a program- ming model best suited to a particular platform. For large codes used daily in production that are under continual development, architecture-specific ports are untenable. Maintainability re- quires single-source application code that is performance portable across a range of architectures and programming models. In this paper we describe RAJA, a portability layer that enables C++ applications to leverage various programming models, and thus architectures, with a single-source codebase. We describe preliminary results using RAJA in three large production codes at Lawrence Livermore National Laboratory, observing 17×, 13× and 12× speedup on GPU-only over CPU- only nodes with single-source application code in each case.},
author = {Beckingsale, David A. and Scogland, Thomas R.W. and Burmark, Jason and Hornung, Rich and Jones, Holger and Killian, William and Kunen, Adam J. and Pearce, Olga and Robinson, Peter and Ryujin, Brian S.},
doi = {10.1109/P3HPC49587.2019.00012},
file = {:Users/settgast1/Documents/Mendeley Desktop/Beckingsale et al/2019/RAJA Portable Performance for Large-Scale Scientific Applications/Beckingsale et al. - 2019 - RAJA Portable Performance for Large-Scale Scientific Applications.pdf:pdf},
isbn = {9781728160030},
journal = {Proceedings of P3HPC 2019: International Workshop on Performance, Portability and Productivity in HPC - Held in conjunction with SC 2019: The International Conference for High Performance Computing, Networking, Storage and Analysis},
pages = {71--81},
title = {{RAJA: Portable Performance for Large-Scale Scientific Applications}},
year = {2019}
}
@InProceedings{Beckingsale:2019,
author={Beckingsale, David A. and Burmark, Jason and Hornung, Rich and Jones, Holger and Killian, William and Kunen, Adam J. and Pearce, Olga and Robinson, Peter and Ryujin, Brian S. and Scogland, Thomas R. W.},
booktitle={2019 IEEE/ACM International Workshop on Performance, Portability and Productivity in HPC (P3HPC)},
title={RAJA: Portable Performance for Large-Scale Scientific Applications},
year={2019},
pages={71-81},
doi={10.1109/P3HPC49587.2019.00012}}

@misc{CHAI:2023,
title = {CHAI},
Expand Down
2 changes: 1 addition & 1 deletion src/docs/JOSS/paper.md
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ The coupled physics package is often responsible for providing the specific cont

To solve these linear systems, GEOS maintains a generic linear algebra interface (LAI) capable of wrapping various linear algebra packages such as hypre [@hypre], PETSc[@petsc-web-page], and Trilinos[@trilinos-website].
Currently only the hypre interaface is actively maintained.
For multi-physics problems involving the solution of a coupled linear system, GEOS relies on hypre's implementation a multi-grid reduction preconditioning strategy as presented by [@BUI:2020;@BUI:2021114111].
For every multi-physics problems involving the solution of a coupled linear system, GEOS currently relies on a multigrid reduction preconditioning strategy available in hypreimplementation a multi-grid reduction preconditioning strategy as presented [@BUI:2020;@BUI:2021114111].

The performance portability strategy utilized by GEOS applies LLNL's suite of portability tools RAJA[@Beckingsale:2019], CHAI[@CHAI:2023], and Umpire[@Beckingsale:2020].
The RAJA performance portability layer provides portable kernel launching and wrappers for reductions, atomics, and local/shared memory to achieve performance on both CPU and GPU hardware.
Expand Down

0 comments on commit 31d3ca9

Please sign in to comment.