-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy patharticle.tex
2006 lines (1668 loc) · 163 KB
/
article.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
% LaTeX support: [email protected]
% For support, please attach all files needed for compiling as well as the log file, and specify your operating system, LaTeX version, and LaTeX editor.
%=================================================================
\documentclass[asi,article,submit,moreauthors]{Definitions/mdpi}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{amsmath}
\usepackage{siunitx}
\usepackage{bbold}
\usepackage{vcell}
\usepackage{tabularx}
\usepackage{tabularray}
\usepackage[compatibility=false]{caption}
\usepackage[frozencache=true]{minted}
\usepackage{pgfplots}
\usepgfplotslibrary{colorbrewer}
\usepackage{pgfplotstable}
\usepgfplotslibrary{statistics}
% initialize Set1-4 from colorbrewer (we're comparing 4 classes),
\pgfplotsset{compat = 1.15, cycle list/Set1-8}
% Tikz is loaded automatically by pgfplots
\usetikzlibrary{pgfplots.statistics, pgfplots.colorbrewer}
% provides \pgfplotstabletranspose
\usepackage{pgfplotstable}
\setcounter{MaxMatrixCols}{20}
\usetikzlibrary{shapes.geometric, arrows, backgrounds, positioning, fit}
\newcommand\ppbb{path picture bounding box}
\tikzstyle{every node}=[font=\small]
\tikzstyle{startstop} = [rectangle, rounded corners, minimum width=3cm, minimum height=1cm, text centered, draw=black, fill=red!30]
\tikzstyle{io} = [trapezium, trapezium left angle=70, trapezium right angle=110, minimum height=1cm, text centered, text width = 2cm, draw=black, fill=blue!30]
\tikzstyle{res} = [trapezium, trapezium left angle=70, trapezium right angle=110, minimum width=3cm, minimum height=1cm, text centered, text width = 2cm, draw=black, fill=green!30]
\tikzstyle{process} = [rectangle, minimum width=3cm, minimum height=1cm, text centered, text width = 2cm, draw=black, fill=orange!30]
\tikzstyle{element} = [rectangle, minimum width=3cm, minimum height=1cm, text centered, text width = 2.5cm, draw=black, fill=black!30]
\tikzstyle{description} = [rectangle, minimum width=3cm, minimum height=1cm, text centered, text width = 3cm, fill opacity=0, text opacity=1, align=left]
\tikzstyle{decision} = [diamond, aspect=1.5, minimum width=3cm, minimum height=1cm, text centered, text width = 2cm, draw=black, fill=white]
\tikzstyle{arrow} = [thick,->,>=stealth]
\tikzstyle{subprocess} = [rectangle, draw=black, fill=orange!30,
minimum width=3cm, minimum height=1cm,inner xsep=3mm,
text width =\pgfkeysvalueof{/pgf/minimum width}-2*\pgfkeysvalueof{/pgf/inner xsep},
align=flush center,
path picture={\draw
([xshift =2mm] \ppbb.north west) -- ([xshift= 2mm] \ppbb.south west)
([xshift=-2mm] \ppbb.north east) -- ([xshift=-2mm] \ppbb.south east);
}]
%--------------------
% Class Options:
%--------------------
%----------
% journal
%----------
% Choose between the following MDPI journals:
% acoustics, actuators, addictions, admsci, adolescents, aerobiology, aerospace, agriculture, agriengineering, agrochemicals, agronomy, ai, air, algorithms, allergies, alloys, analytica, analytics, anatomia, animals, antibiotics, antibodies, antioxidants, applbiosci, appliedchem, appliedmath, applmech, applmicrobiol, applnano, applsci, aquacj, architecture, arm, arthropoda, arts, asc, asi, astronomy, atmosphere, atoms, audiolres, automation, axioms, bacteria, batteries, bdcc, behavsci, beverages, biochem, bioengineering, biologics, biology, biomass, biomechanics, biomed, biomedicines, biomedinformatics, biomimetics, biomolecules, biophysica, biosensors, biotech, birds, bloods, blsf, brainsci, breath, buildings, businesses, cancers, carbon, cardiogenetics, catalysts, cells, ceramics, challenges, chemengineering, chemistry, chemosensors, chemproc, children, chips, cimb, civileng, cleantechnol, climate, clinpract, clockssleep, cmd, coasts, coatings, colloids, colorants, commodities, compounds, computation, computers, condensedmatter, conservation, constrmater, cosmetics, covid, crops, cryptography, crystals, csmf, ctn, curroncol, cyber, dairy, data, ddc, dentistry, dermato, dermatopathology, designs, devices, diabetology, diagnostics, dietetics, digital, disabilities, diseases, diversity, dna, drones, dynamics, earth, ebj, ecologies, econometrics, economies, education, ejihpe, electricity, electrochem, electronicmat, electronics, encyclopedia, endocrines, energies, eng, engproc, entomology, entropy, environments, environsciproc, epidemiologia, epigenomes, est, fermentation, fibers, fintech, fire, fishes, fluids, foods, forecasting, forensicsci, forests, foundations, fractalfract, fuels, future, futureinternet, futurepharmacol, futurephys, futuretransp, galaxies, games, gases, gastroent, gastrointestdisord, gels, genealogy, genes, geographies, geohazards, geomatics, geosciences, geotechnics, geriatrics, grasses, gucdd, hazardousmatters, healthcare, hearts, hemato, hematolrep, heritage, higheredu, highthroughput, histories, horticulturae, hospitals, humanities, humans, hydrobiology, hydrogen, hydrology, hygiene, idr, ijerph, ijfs, ijgi, ijms, ijns, ijpb, ijtm, ijtpp, ime, immuno, informatics, information, infrastructures, inorganics, insects, instruments, inventions, iot, j, jal, jcdd, jcm, jcp, jcs, jcto, jdb, jeta, jfb, jfmk, jimaging, jintelligence, jlpea, jmmp, jmp, jmse, jne, jnt, jof, joitmc, jor, journalmedia, jox, jpm, jrfm, jsan, jtaer, jvd, jzbg, kidneydial, kinasesphosphatases, knowledge, land, languages, laws, life, liquids, literature, livers, logics, logistics, lubricants, lymphatics, machines, macromol, magnetism, magnetochemistry, make, marinedrugs, materials, materproc, mathematics, mca, measurements, medicina, medicines, medsci, membranes, merits, metabolites, metals, meteorology, methane, metrology, micro, microarrays, microbiolres, micromachines, microorganisms, microplastics, minerals, mining, modelling, molbank, molecules, mps, msf, mti, muscles, nanoenergyadv, nanomanufacturing,\gdef\@continuouspages{yes}} nanomaterials, ncrna, ndt, network, neuroglia, neurolint, neurosci, nitrogen, notspecified, %%nri, nursrep, nutraceuticals, nutrients, obesities, oceans, ohbm, onco, %oncopathology, optics, oral, organics, organoids, osteology, oxygen, parasites, parasitologia, particles, pathogens, pathophysiology, pediatrrep, pharmaceuticals, pharmaceutics, pharmacoepidemiology,\gdef\@ISSN{2813-0618}\gdef\@continuous pharmacy, philosophies, photochem, photonics, phycology, physchem, physics, physiologia, plants, plasma, platforms, pollutants, polymers, polysaccharides, poultry, powders, preprints, proceedings, processes, prosthesis, proteomes, psf, psych, psychiatryint, psychoactives, publications, quantumrep, quaternary, qubs, radiation, reactions, receptors, recycling, regeneration, religions, remotesensing, reports, reprodmed, resources, rheumato, risks, robotics, ruminants, safety, sci, scipharm, sclerosis, seeds, sensors, separations, sexes, signals, sinusitis, skins, smartcities, sna, societies, socsci, software, soilsystems, solar, solids, spectroscj, sports, standards, stats, std, stresses, surfaces, surgeries, suschem, sustainability, symmetry, synbio, systems, targets, taxonomy, technologies, telecom, test, textiles, thalassrep, thermo, tomography, tourismhosp, toxics, toxins, transplantology, transportation, traumacare, traumas, tropicalmed, universe, urbansci, uro, vaccines, vehicles, venereology, vetsci, vibration, virtualworlds, viruses, vision, waste, water, wem, wevj, wind, women, world, youth, zoonoticdis
% For posting an early version of this manuscript as a preprint, you may use "preprints" as the journal. Changing "submit" to "accept" before posting will remove line numbers.
%---------
% article
%---------
% The default type of manuscript is "article", but can be replaced by:
% abstract, addendum, article, book, bookreview, briefreport, casereport, comment, commentary, communication, conferenceproceedings, correction, conferencereport, entry, expressionofconcern, extendedabstract, datadescriptor, editorial, essay, erratum, hypothesis, interestingimage, obituary, opinion, projectreport, reply, retraction, review, perspective, protocol, shortnote, studyprotocol, systematicreview, supfile, technicalnote, viewpoint, guidelines, registeredreport, tutorial
% supfile = supplementary materials
%----------
% submit
%----------
% The class option "submit" will be changed to "accept" by the Editorial Office when the paper is accepted. This will only make changes to the frontpage (e.g., the logo of the journal will get visible), the headings, and the copyright information. Also, line numbering will be removed. Journal info and pagination for accepted papers will also be assigned by the Editorial Office.
%------------------
% moreauthors
%------------------
% If there is only one author the class option oneauthor should be used. Otherwise use the class option moreauthors.
%---------
% pdftex
%---------
% The option pdftex is for use with pdfLaTeX. Remove "pdftex" for (1) compiling with LaTeX & dvi2pdf (if eps figures are used) or for (2) compiling with XeLaTeX.
%=================================================================
% MDPI internal commands - do not modify
\firstpage{1}
\makeatletter
\setcounter{page}{\@firstpage}
\makeatother
\pubvolume{1}
\issuenum{1}
\articlenumber{0}
\pubyear{2024}
\copyrightyear{2024}
%\externaleditor{Academic Editor: Firstname Lastname}
\datereceived{ }
\daterevised{ } % Comment out if no revised date
\dateaccepted{ }
\datepublished{ }
%\datecorrected{} % For corrected papers: "Corrected: XXX" date in the original paper.
%\dateretracted{} % For corrected papers: "Retracted: XXX" date in the original paper.
\hreflink{https://doi.org/} % If needed use \linebreak
%\doinum{}
%\pdfoutput=1 % Uncommented for upload to arXiv.org
%=================================================================
% Add packages and commands here. The following packages are loaded in our class file: fontenc, inputenc, calc, indentfirst, fancyhdr, graphicx, epstopdf, lastpage, ifthen, float, amsmath, amssymb, lineno, setspace, enumitem, mathpazo, booktabs, titlesec, etoolbox, tabto, xcolor, colortbl, soul, multirow, microtype, tikz, totcount, changepage, attrib, upgreek, array, tabularx, pbox, ragged2e, tocloft, marginnote, marginfix, enotez, amsthm, natbib, hyperref, cleveref, scrextend, url, geometry, newfloat, caption, draftwatermark, seqsplit
% cleveref: load \crefname definitions after \begin{document}
%=================================================================
% Please use the following mathematics environments: Theorem, Lemma, Corollary, Proposition, Characterization, Property, Problem, Example, ExamplesandDefinitions, Hypothesis, Remark, Definition, Notation, Assumption
%% For proofs, please use the proof environment (the amsthm package is loaded by the MDPI class).
%=================================================================
% Full title of the paper (Capitalized)
\Title{An Application Driven Method for Assembling Numerical Schemes for the Solution of Complex Multiphysics Problems}
% MDPI internal command: Title for citation in the left column
\TitleCitation{An Application Driven Method for Assembling Numerical Schemes for the Solution of Complex Multiphysics Problems}
% Author Orchid ID: enter ID or remove command
\newcommand{\orcidauthorA}{0000-0003-3108-3171} % Add \orcidA{} behind the author's name
\newcommand{\orcidauthorB}{0000-0002-2799-9075} % Add \orcidB{} behind the author's name
% Authors, for the paper (add full first names)
\Author{Patrick Zimbrod $^{1,\dagger}$\orcidA{}, Michael Fleck $^{2}$\orcidB{} and Johannes Schilp $^{1}$*}
%\longauthorlist{yes}
% MDPI internal command: Authors, for metadata in PDF
\AuthorNames{Patrick Zimbrod, Michael Fleck and Johannes Schilp}
% MDPI internal command: Authors, for citation in the left column
\AuthorCitation{Zimbrod, P.; Fleck, M.; Schilp, J.}
% If this is a Chicago style journal: Lastname, Firstname, Firstname Lastname, and Firstname Lastname.
% Affiliations / Addresses (Add [1] after \address if there is only one affiliation.)
\address{%
$^{1}$ \quad Applied Computer Science, University of Augsburg\\
$^{2}$ \quad Metals and Alloys, University of Bayreuth}
% Contact information of the corresponding author
\corres{Correspondence: [email protected]}
% Current address and/or shared authorship
% The commands \thirdnote{} till \eighthnote{} are available for further notes
%\simplesumm{} % Simple summary
%\conference{} % An extended version of a conference paper
% Abstract (Do not insert blank lines, i.e. \\)
\abstract{Within recent years, considerable progress has been made regarding high-performance solvers for Partial Differential Equations (PDEs), yielding potential gains in efficiency compared to industry standard tools. However, the latter largely remains the status quo for scientists and engineers focusing on applying simulation tools to specific problems in practice.
We attribute this growing technical gap to the increasing complexity and knowledge required to pick and assemble state-of-the-art methods.
Thus, with this work, we initiate an effort to build a common taxonomy for the most popular grid-based approximation schemes to draw comparisons regarding accuracy and computational efficiency.
We then build upon this foundation and introduce a method to systematically guide an application expert through classifying a given PDE problem setting and identifying a suitable numerical scheme.
Great care is taken to ensure that making a choice this way is unambiguous, i.e. the goal is to obtain a clear and reproducible recommendation.
Our method not only helps to identify and assemble suitable schemes but enables the unique combination of multiple methods on a per-field basis.
We demonstrate this process and its effectiveness using different model problems, each comparing the resulting numerical scheme from our method with the next best choice.
For both the Allen Cahn and advection equations, we show that substantial computational gains can be attained for the recommended numerical methods regarding accuracy and efficiency.
Lastly, we outline how one can systematically analyze and classify a coupled multiphysics problem of considerable complexity with 6 different unknown quantities, yielding an efficient, mixed discretization that in configuration compares well to high-performance implementations from the literature.}
% Keywords
\keyword{simulation; multiphysics; finite difference; finite volume; finite element; discontinuous galerkin}
% The fields PACS, MSC, and JEL may be left empty or commented out if not applicable
%\PACS{J0101}
%\MSC{}
%\JEL{}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Only for the journal Diversity
%\LSID{\url{http://}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Only for the journal Applied Sciences
%\featuredapplication{Authors are encouraged to provide a concise description of the specific application or a potential application of the work. This section is not mandatory.}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Only for the journal Data
%\dataset{DOI number or link to the deposited data set if the data set is published separately. If the data set shall be published as a supplement to this paper, this field will be filled by the journal editors. In this case, please submit the data set as a supplement.}
%\datasetlicense{License under which the data set is made available (CC0, CC-BY, CC-BY-SA, CC-BY-NC, etc.)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Only for the journal Toxins
%\keycontribution{The breakthroughs or highlights of the manuscript. Authors can write one or two sentences to describe the most important part of the paper.}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Only for the journal Encyclopedia
%\encyclopediadef{For entry manuscripts only: please provide a brief overview of the entry title instead of an abstract.}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Only for the journal Advances in Respiratory Medicine
%\addhighlights{yes}
%\renewcommand{\addhighlights}{%
%\noindent This is an obligatory section in “Advances in Respiratory Medicine”, whose goal is to increase the discoverability and readability of the article via search engines and other scholars. Highlights should not be a copy of the abstract, but a simple text allowing the reader to quickly and simplified find out what the article is about and what can be cited from it. Each of these parts should be devoted up to 2~bullet points.\vspace{3pt}\\
%\textbf{What are the main findings?}
% \begin{itemize}[labelsep=2.5mm,topsep=-3pt]
% \item First bullet.
% \item Second bullet.
% \end{itemize}\vspace{3pt}
%\textbf{What is the implication of the main finding?}
% \begin{itemize}[labelsep=2.5mm,topsep=-3pt]
% \item First bullet.
% \item Second bullet.
% \end{itemize}
%}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\endnote{This is an endnote.} % To use endnotes, please un-comment \printendnotes below (before References). Only journal Laws uses \footnote.
% The order of the section titles is different for some journals. Please refer to the "Instructions for Authors” on the journal homepage.
\section{Introduction}\label{sec:introduction}
Within the discipline of accurately and efficiently modeling physical processes that are described by Partial Differential Equations (PDEs), one is confronted with an increasing amount of choices of numerical methods to perform this job.
To an application-oriented expert, that is, engineers as well as scientists who are quite familiar with their respective scientific domain, but not necessarily with the numerics of PDE approximation, this abundance of choices may quickly appear daunting due to the growing amount of research in the numerics community.
This trend can be observed in particular within the Finite Element framework, which has grown to be one of the most widely used and also formally investigated methods in this field.
Modern, general and mathematically rigorous implementations of the Finite Element Method (FEM), such as \texttt{deal.II}\,\cite{bangerthDealIIGeneralpurpose2007}, \texttt{FEniCS}\,\cite{AlnaesEtal2015}, \texttt{Firedrake}\,\cite{FiredrakeUserManual} or \texttt{MFEM}\,\cite{andersonMFEMModularFinite2021a}, offer a wide variety of formulations that one can choose from, with varying degrees of customizability.
The amount of different options is best illustrated by considering the various Finite Elements and corresponding function spaces that the application expert typically can and must choose from nowadays.
For instance, the popular website DefElement, which summarises a vast amount of Finite Element types along their characteristics and shape functions, offers over 45 different choices of element to approximate scalar and over 40 for vector quantities\,\cite{scroggsDefElementEncyclopediaFinite2023}.
These elements are, for the most part, readily implemented in the abovementioned software libraries.
However, not all of them necessarily produce good or even stable approximations for any given PDE problem \cite{brezziDiscourseStabilityConditions1990}.
Even if one were to have prior knowledge on a good choice of function space, e.g. $H(\textrm{div})$ to approximate divergence-free quantities\,\cite{johnDivergenceConstraintMixed2017}, one would still have to choose between more than 20 different kinds of Finite element.
This exemplifies the large flexibility and sophistication of the FEM that has evolved in recent years, but consequently also the ambiguity of choice that these developments bring along.
The outlined problem of choosing the right tool for the problem naturally becomes even more of an issue when other standard tools that are used in practice are additionally considered, such as Finite Difference and Finite Volume methods.
These recent advancements are, however, in contrast to established numerical techniques that are oftentimes still used as a standard in practice.
For example, the Finite Volume method, although relying on a rather old and simple concept, is still widely considered the standard practice for solving problems in Computational Fluid Dynamics\,\cite{shademanEvaluationOpenFOAMAcademic2013}.
In contrast, there have been several recent developments based on the Discontinuous Galerkin method that have shown to perform noticeably better for fluid dynamics problems \cite{zhouNumericalComparisonWENO2001}.
We thus note that a gap has emerged between industry standard tools and modern, high-performance numerical methods that we attribute to the increasing complexity and insight required to properly assemble the latter.
Additionally, apart from these outlined recent developments, choosing an approximation method that offers the right amount and type of degrees of freedom is essential for obtaining a solution that can be efficiently computed.
That is, a numerical scheme should be tailored to a specific problem at hand to achieve good convergence and stability properties.
Where the former might be negligible in practice since achieving the utmost performance is not always important, having a stable approximation is paramount.
One must hence make use of the specific properties of a PDE problem to yield a good approximation.
This line of reasoning not only applies to problems governed by a single PDE but even more so to multiple equations forming a system.
We will in the following denote such problems as multiphysics problems.
These oftentimes describe processes that are either distinct in their qualitative behavior or even operate on different length scales.
As a result, using one discretization method in a monolithic way will not perform equally well for each PDE of that system, creating bottlenecks regarding either stability or accuracy and thus further complicating the question of which specific method is best suited for the job.
On the other hand, using multiple numerical methods within one multiphysics problem requires interoperability of all discretizations.
Coupling different solvers has shown to quickly become tedious regarding implementation and may even yield severe bottlenecks due to large amounts of data transfer.
Having a common formulation for some schemes is thus desirable, such that one may recover specific methods by imposing abstractions, for example by omitting some steps in assembling a global linear system.
In this work, we make an initial effort towards closing the outlined, increasing gap between state-of-the-art research in the numerics community and best practices in applications.
As covering the entirety of numerical methods available is impossible in practice, especially for non-experts in every single aspect, we initially restrict the scope of view to a rather narrow subset of methods.
The added benefit of this approach is that this enables us to remove methods that would otherwise render making a problem-oriented choice largely ambiguous.
The contributions of this work to address the abovementioned problems are twofold:
First, we propose a unifying approximation taxonomy that enables recovering the most prevalent grid-based numerical methods by imposing some well-defined abstractions, albeit with a relatively narrow scope for now.
Secondly, we propose a generalized framework for choosing an appropriate, that is, stable and performant numerical scheme given a fixed set of inputs.
These include the system of PDEs, the triangulation where the problem is defined as well as the available computing hardware.
As such a method necessitates a unified view of all the numerical methods considered to enable quantifiable comparisons, this heavily builds on the first part of this article.
In combination, this enables an application expert to make an informed choice on which scheme to use on a per-field basis given some equally well-defined inputs.
Since in an application setting, stability is typically more important than convergence rate, we focus our effort on providing methods that produce stable solutions but do not fall behind too much compared to the utmost performant alternatives.
The remainder of this work is thus structured as follows:
We first present a brief review of works in the literature that are concerned with comparing the mentioned numerical schemes which we subsequently build upon.
The theory necessary to construct such a baseline will be covered in the following sections.
We will then proceed by analyzing typically given inputs for a PDE problem and assemble a method to systematically derive suitable numerical schemes.
The results of choosing and implementing numerical methods according to our developed framework will be demonstrated afterward.
We will investigate two distinct benchmark problems, each comparing two methods that would be closest to being optimal in that specific case.
Special attention will be given to accuracy concerning the analytical solution as well as computational complexity and performance.
Finally, we will demonstrate the effectiveness of the presented framework by assessing a complex and currently relevant multiphysics problem.
We show how to systematically arrive at a mixed choice of discretization schemes using the proposed decision method. Finally, we indicate some relevant works in the literature that employ similar approximations, highlighting the validity and relevance of this work.
\section{Previous Works}
In this section, we outline some prior efforts aimed at comparing different grid-based numerical methods or drawing connections between them.
Besides the works mentioned henceforth, there exists a vast amount of literature that is concerned with the unique properties of each method, which we omit here for the sake of brevity.
In the literature, there are rather few works that are concerned with spanning the connection between different grid-based approximation schemes.
Some authors rigorously showed the equivalence of the Finite Volume method to either Mixed Finite Element \cite{barangerConnectionFiniteVolume1996} or Petrov Galerkin Finite Element Methods \cite{yeRelationshipFiniteVolume2001,idelsohnFiniteVolumesFinite1994}.
With regards to the Finite Difference and Finite Element Method, Thomèe has shown early on that the FEM can be understood as a somewhat equivalent, yet generalized variant of taking Finite Differences on arbitrary grids \cite{thomeeFiniteDifferenceFinite1984}.
Some general differences between these schemes were outlined by Key and Krieg \cite{keyComparisonFiniteElementFiniteDifference1973}.
In the work of Shu, some analogies have been brought up between Finite Volume and Finite Difference schemes in WENO formulation \cite{shuHighorderFiniteDifference2003}.
A theoretical and numerical comparison between higher-order Finite Volume and Discontinuous Galerkin Methods was conducted by Zhou et al. \cite{zhouNumericalComparisonWENO2001}.
Additionally, Dumbser et al. constructed a unifying framework to accommodate high-order Finite Volume and Discontinuous Galerkin schemes \cite{dumbserUnifiedFrameworkConstruction2008a}.
In the context of elliptic PDEs, Lin et al. present a theoretical and empirical comparison between the comparably new weak Galerkin, Discontinuous Galerkin and mixed Finite Element schemes \cite{linComparativeStudyWeak2015a}.
A comparative study between Discontinuous Galerkin and the Streamline Upwind Petrov Galerkin method for flow problems can be found in \cite{yurunComparativeStudyDiscontinuous1997}.
These works in summary draw point-wise comparisons between some grid-based approximation schemes.
Despite being quite useful for disseminating individual advantages and disadvantages for a given application, one may still lack an understanding of the general properties.
Furthermore, Bui-Thanh has presented an encompassing analysis and application of the Hybridizable Discontinuous Galerkin Method (HDG) to solve a wide variety of PDE-governed problems.
It was therefore shown that this numerical scheme is general and powerful enough to form a unified baseline \cite{bui-thanhGodunovUnifiedHybridized2015}.
In addition, due to the generality of this method, there have been works that attempt to benchmark DG methods to more conventional and widely adopted Continuous Galerkin methods (CG) \cite{yakovlevCGHDGComparative2016,kronbichlerPerformanceComparisonContinuous2018,kirbyCGHDGComparative2012}.
Some authors proposed combinations of numerical schemes that operate optimally to solve hyperbolic \cite{gaburroUnifiedFrameworkSolution2021} or parabolic \cite{yangOptimallyEfficientTechnique2017} systems of PDEs.
In summary, we draw the following conclusion from this brief review of the relevant literature.
To this date, there only exist a few comparisons between grid-based approximation schemes that outline common properties in a mostly ad hoc or point-wise manner.
We have, however, outlined in the previous section that it would be beneficial from an application-oriented perspective to have an encompassing taxonomy for these numerical schemes.
Furthermore, many different variants of numerical schemes have been proposed to tackle a wide variety of PDE problems.
What appears to be missing, though, is a general guideline on how to choose between these vast alternatives to obtain a method, or possibly a combination of different methods, to solve a system in a stable (above all) and reasonably efficient way.
\section{Theoretical Baseline}\label{sec:baseline}
The general procedure of this chapter is as follows.
We first introduce the most general scheme considered here, which is the Discontinuous Galerkin Method.
Then, for each additional scheme considered, we individually work out the necessary simplifications to arrive at that numerical method starting at the DGM.
In the remaining sections of this article, we use underline notation ($\underline{\cdot},\,\underline{\underline{\cdot}}$) to indicate vectors and matrices and roman indices ($i,j$) to denote elements of lists or arrays on the computational level.
We assume the reader of this article to have a coarse overview of the presented methods, but not much insight into the specifics of each. We thus present the necessary theory in a comparably coarse manner that focuses on the qualitative characteristics.
\subsection{Discontinuous Galerkin Method}
This method was originally proposed in 1973 to solve challenging hyperbolic transport equations in nuclear physics \cite{reedTriangularMeshMethods1973}.
In spirit, it can be held as a synthesis of Finite Element and Finite Volume schemes and poses a generalized variant of both.
To derive such a scheme, we begin by stating the strong form of a given PDE. The most straightforward example in this case would be a first-order linear advection equation with a homogeneous von Neumann boundary condition:
\begin{equation} \label{eq:advection-strong}
\partial_t \alpha + \underline{u} \cdot \nabla \alpha = 0, \qquad \frac{\partial \alpha}{\partial n} = 0 \quad\forall x \in \partial \Omega
\end{equation}
where $\partial_t$ signifies the temporal derivative, $x$ is the set of spatial coordinates, $\partial \Omega$ denotes the boundary of the computational domain $\Omega$ and $n$ is the unit normal with respect to $\partial \Omega$.
In this case, and henceforth in this article, we assume for reasons of simplicity that the velocity field is divergence-free, i.e. $\nabla \cdot \underline{u} = 0$.
We now state the weak form of Eq. \ref{eq:advection-strong}, that is we multiply with a test function $v$, integrate over the entirety of the domain $\Omega$, and apply partial integration to the second term on the left-hand side that contains the nabla operator.
For a divergence free velocity field, one may set $\underline{u} \cdot \nabla \alpha = \nabla \cdot \left(\underline{u}\alpha\right)$, which results in the following formulation: Find $\alpha \in V$ such that:
\begin{equation}\label{eq:advection-weak-dgm}
\int_\Omega v \cdot \partial_t \alpha \,dx + \int_{\partial \Omega} v \alpha (\underline{u}\cdot \underline{n}) \,ds - \int_\Omega \nabla v \cdot \underline{u} \alpha \,dx = 0 \qquad \forall v \in W
\end{equation}
Where we need to make an appropriate choice for the solution space $V$ and the test space $W$ which may but do not need to differ from each other.
Due to partial integration, we now encounter an additional term that has to be integrated over the domain boundary $\partial \Omega$, where $(\underline{u}\cdot \underline{n})$ denotes the velocity component normal to the boundary.
To make such a problem solvable by a computer, one must additionally choose the discretization of the solution space $V$, denoted $V_h$.
A particularly popular choice of space is the set of Lagrange polynomials.
In addition, the physical space must be discretized in the form of a triangulation.
The DG scheme then consists of assembling the finite-dimensional, linear system on the element level.
This enables high locality of the solution process, which leads to efficient computation on parallel architectures as less data transfer is required.
One resulting key feature of the DG scheme is that the elements now do not overlap anymore in terms of their degrees of freedom.
Thus, the global problem is broken up into individual problems.
This in general leads to large systems that are however sparse and in the case of the mass matrix even block diagonal.
The remaining term, often denoted the numerical flux, is the only term within the physical domain that ensures coupling across elements.
Through evaluation of this surface integral, adjacent degrees of freedom are coupled and thus global conservation of quantities can be assured.
As the polynomial space of DG schemes only belongs to the $L^2$ space of functions but not $H^1$, the basis functions are discontinuous and thus the derivative at boundaries is not well defined.
Solving PDEs involving second derivatives is thus not possible as is.
As a consequence, there have been many successful extensions of this method to circumvent that problem. At this point, we name the most prevalent schemes, namely the Symmetric Interior Penalty \cite{epshteynSymmetricInteriorPenalty}, Hybridizable \cite{warburtonDiscontinuousGalerkinMethod1999} and Local DG scheme \cite{cockburnLocalDiscontinuousGalerkin1998}.
These methods, despite having different approaches, have been extensively studied and compared to each other \cite{arnoldUnifiedAnalysisDiscontinuous2002}.
As it turns out, all methods work well and have individual advantages and disadvantages.
For this work, we will take the Hybridizable DG scheme as a general framework.
We note here that the proposed method would however work with any of the other schemes given above.
Within the abovementioned methods, one introduces an additional term in the weak form that serves as a penalty for discontinuous solutions.
An alternative approach that is also pursued within the HDG scheme is the algebraic manipulation of the PDE system by splitting.
One recursively introduces new dependent variables for quantities that appear in higher-order derivatives such that each quantity is differentiated at most once.
We illustrate this using the Poisson equation:
\begin{equation}
\Delta u = 0
\end{equation}
The corresponding, well-known weak form is: Find $u \in V$, such that for all $v \in V$
\begin{equation}\label{eq:poisson-weakform}
\int_{\partial\Omega} v (\nabla u \cdot \underline{n}) \,ds - \int_\Omega \nabla v \nabla u \,dx = 0
\end{equation}
By introducing the auxiliary variable $\sigma = \nabla u$, Eq. \ref{eq:poisson-weakform} is extended to the following system:
\begin{align}
\int_{\partial\Omega} v (\sigma \cdot \underline{n}) \,ds - \int_\Omega \nabla v \sigma \,dx = 0 \\
\sigma = \nabla u
\end{align}
Employing such an approach enables splitting PDE systems of arbitrary order resulting in larger systems of first order PDEs.
\subsection{Continuous Galerkin Finite Element Method}
The most straightforward step to conduct is to derive the Continuous Galerkin (CG) from the DG method.
The former is oftentimes also referred to as the classic Finite Element method, being the original formulation used to solve problems in structural mechanics \cite{liuEightyYearsFinite2022}.
In this case, all degrees of freedom (DoFs) in the domain are global, in contrast to being local to each cell.
However, each basis function associated with a given degree of freedom has compact support and is thus only non-zero within the direct vicinity.
The resulting linear system hence remains sparse but has considerably fewer DoFs than an equivalent discretization produced by a DG method.
One may obtain a CG method starting from the DGM by strongly coupling the degrees of freedom at cell interfaces. In other words, the previously discontinuous approximation must be made continuous.
In terms of the weak form of a given problem, the numerical flux that has been introduced by partial integration has to vanish.
This step is exactly taken in deriving weak forms for the CG scheme.
The equivalent weak form of the advection equation given by Eq. \ref{eq:advection-weak-dgm} is then: Find $u \in V$ such that
\begin{equation}\label{eq:advection-weak-cgm}
\int_\Omega v \cdot \partial_t \alpha \,dx - \int_\Omega \nabla v \cdot \underline{u} \alpha \,dx = 0 \qquad \forall v \in V
\end{equation}
By coupling coinciding DoFs, one may equivalently introduce shared DoFs between cells.
This results in comparison to the DGM in a smaller global system that is in turn more coupled, yielding more non-zero entries per row and column in the system matrices.
\begin{figure}[htbp!]
\centering
\captionsetup[subfigure]{justification=centering}
\begin{subfigure}[c]{0.49\textwidth}
\centering
\includegraphics[]{Figs/Theory/cg-2d.eps}
\caption{}
\vfill
\label{fig:cg-2d}
\end{subfigure}
\begin{subfigure}[c]{0.49\textwidth}
\centering
\includegraphics[]{Figs/Theory/dg-2d.eps}
\caption{}
\label{fig:dg-2d}
\end{subfigure}
\caption{Coupling of global DoFs in the Continuous Galerkin (a) versus Discontinuous Galerkin FEM (b), both of first order. In the latter case, DoFs are entirely local to the cell and thus receive no contribution from neighboring cells. Weak coupling is only introduced by the additional numerical flux. Coupled DoFs are drawn in identical colors.}
\label{fig:cg-vs-dg-2d}
\end{figure}
The condensation of such a system by coupling DoFs is illustrated in Figure \ref{fig:cg-vs-dg-2d}.
From the numbering of DoFs in both figures, it becomes apparent that the amount of additional allocations grows drastically with increasing dimensionality of the problem.
As the numerical flux is zero by definition for a CG scheme, we may also omit it from computation.
Thus, the CGM is noticeably less arithmetically intensive in this regard.
However, this computational saving is offset by the strong coupling of DoFs, resulting in a more dense linear system and possibly a more complex assembly process in terms of memory management.
As equivalence can be shown here based on the weak form and thus early on in the model assembly process, the choice of Finite Element is unaffected.
This in consequence also applies to the chosen type of triangulation or the order of approximation.
\subsection{Finite Difference Method}\label{sec:fdm}
At first glance, the Finite Difference Method (FDM) might appear to be conceptually different from the Finite Element methods given above. Instead of treating the discretized problem in an element-wise manner, the FDM operates on discrete points directly and per se lacks a notion of cells in the domain.
Yet, both methods still may yield identical results in discretization.
A comparison of both approaches is shown in Figure \ref{fig:fd-vs-cg-2d}.
\begin{figure}[htbp!]
\centering
\captionsetup[subfigure]{justification=centering}
\begin{subfigure}[t]{0.49\textwidth}
\centering
\includegraphics[]{Figs/Theory/fd-stencil-2d.eps}
\caption{}
\label{fig:fd-stencil-2d}
\end{subfigure}
\hfill
\begin{subfigure}[t]{0.49\textwidth}
\centering
\includegraphics[]{Figs/Theory/cg-2d-collocation.eps}
\caption{}
\label{fig:cg-2d-collocatoin}
\end{subfigure}
\caption{Comparison of the nodal nature of the FDM (a) versus the cell-wise assembly used in the CG FEM (b) for an identical, cartesian triangulation with 9 nodes. Both methods are formulated as first-order approximations.
Red-colored nodes signify the points where the PDE is evaluated. Contributions to this node are taken from blue nodes, whereas white nodes from no contribution.}
\label{fig:fd-vs-cg-2d}
\end{figure}
In the figures, $i$ and $j$ denote vertical and horizontal indices of grid nodes, $\phi_i$ are the FE basis functions and Roman letters denote indices of cells.
Forming for example the laplacian for an FDM requires access to the vicinity of vertex $i,j$ (red node) in all cartesian directions (blue-colored nodes). For both methods, gray-marked nodes do not pose a contribution to the value of the central node. For a special case of FEM with quadrilateral elements, the same nodes form contributions to the global basis function $\phi_5$. However, we now do not evaluate the laplacian operator directly but instead gather contributions from weak form integrals. In the case of $\phi_5$, we have to gather contributions from cells $I$ to $IV$.
However, one may still show the equivalence of CGM and FDM by investigating the resulting global linear system.
We exemplify this claim using the laplacian as a differential operator and the second-order central stencil.
This formulation continues to be widely used as an approximation technique.
As will be shown later, this particular choice of operator is especially straightforward to compare with the CG-FEM due to the choice of trial and test functions.
On a cartesian, two-dimensional grid with uniform spacing $h$ in both directions, the approximation reads
\begin{equation}\label{eq:fd-laplacian}
\Delta u \approx \frac{u_{i-1,j}+u_{i,j-1}-4u_{i,j}+u_{i+1,j}+u_{i,j+1}}{h^2}.
\end{equation}
Such a system in stencil notation will produce a global matrix with main diagonal values 4 and four off-diagonals with entries 1.
We now proceed to construct an equivalent CG Finite Element scheme, where the global system matrix is required to be exactly equivalent to the FD formulation.
The weak (CG) laplacian can be formulated as: Find $u_h \in V$ such that for all $v_h \in V$
\begin{equation}
\int_\Omega v \Delta u \,dx = \underbrace{\int_{\partial\Omega} v (\nabla u \cdot \underline{n}) \,ds}_{=0} - \int_\Omega \nabla v \nabla u \,dx
\end{equation}
We have in this case introduced the additional restriction that trial and test space be identical, that is, we use a Bubnov Galerkin method.
Now, let $\Omega$ be an identical triangulation to the FD variant using quadrilateral $\mathbb{Q}^1$ elements, that is linear Lagrange elements.
Then, the four basis functions spanning the reference element are:
\begin{align}
\phi_1(x,y) &= xy - x - y + 1 &&\\
\phi_2(x,y) &= x (1 - y) &&\\
\phi_3(x,y) &= y (1 - x) &&\\
\phi_4(x,y) &= x y &&
\end{align}
The Finite Difference stencil given by Eq. \ref{eq:fd-laplacian} only takes into account contributions from nodes that lie strictly horizontally or vertically from the node of interest.
As a consequence, the node on the reference quadrilateral that is positioned diagonally from the center node must not have any contribution to the weak form integral, otherwise the resulting linear system cannot be equal.
We thus need to evaluate the weak form in a way such that the resulting matrix $\phi_i \cdot \phi_j$ becomes sparse.
It turns out that this can be achieved by choosing a collocation method for quadrature.
In that case, quadrature points are chosen to coincide with the node coordinates and as a consequence, the mass matrix $\phi_i \cdot \phi_j$ becomes the identity matrix.
From the family of Gaussian quadrature schemes, one can achieve this using a Gauss-Lobatto quadrature of order equal to the polynomial order of the Finite Element.
We note at this point that choosing this particular combination of quadrature method and number of nodes will lead to inexact integration and thus, a numerical error is introduced.
In this case, to produce a collocated scheme, one must pick the second-order Gauss-Lobatto variant using two quadrature nodes per coordinate direction.
As this type of integration is known to be accurate up to degree $2n-3$, this scheme will only integrate linear polynomials exactly.
However, the above-listed basis functions are bilinear and have a combined polynomial order of $2$.
Thus, integration will not be accurate in this case.
However, the modern FEM in general does not prescribe any particular method of integrating the weak formulation per se \cite{ciarletFiniteElementMethod2002}.
Thus, although the results of a properly implemented FEM in the sense of exact integration will be slightly more accurate, one can still show equivalence regarding a particular instance of the FEM.
We now evaluate the element-wise stiffness matrix $-\int_{\Omega^{(e)}} \nabla_k \phi_i \nabla_k \phi_j \,dx$ within the reference domain $\left[0;1\right]\times\left[0;1\right]$ for the given first order Lagrange element using Gauss-Lobatto quadrature, more specifically the variant using two quadrature points per coordinate direction.
This results in:
\begin{equation}
\underline{\underline{K}}^{(e)} = \begin{bmatrix}
-1 & 1/2 & 1/2 & 0 \\
1/2 & -1 & 0 & 1/2 \\
1/2 & 0 & -1 & 1/2 \\
0 & 1/2 & 1/2 & -1
\end{bmatrix}
\end{equation}
As such, $\underline{\underline{K}}^{(e)}$ does not yet equal Eq. \ref{eq:fd-laplacian}.
The final step consists of assembling the linear system in the physical domain using the reference stiffness matrix.
In a cartesian mesh in two dimensions, an interior node is owned by four quadrilateral elements.
If one carries out this assembly process an equivalent formulation can be obtained:
\begin{equation}
\underline{\underline{K}} = \begin{bmatrix}
\ddots & \ddots & & & \vdots & \vdots & \vdots & & & & \\
\ddots & \ddots & \ddots & & \vdots & 1 & \vdots & & & & \\
& \ddots & \ddots & \ddots & \vdots & \vdots & \vdots & & & & \\
& & \ddots & \ddots & \ddots & 1 & \vdots & & & & \\
\cdots & \cdots & \cdots & \ddots & \ddots & \ddots & \vdots & \cdots & \cdots & \cdots & \cdots \\
\cdots & 1 & \cdots & 1 & \ddots & -4 & \ddots & 1 & \cdots & 1 & \cdots \\
\cdots & \cdots & \cdots & \cdots & \vdots & \ddots & \ddots & \ddots & \cdots & \cdots & \cdots \\
& & & & \vdots & 1 & \ddots & \ddots & \ddots & & \\
& & & & \vdots & \vdots & \vdots & \ddots & \ddots & \ddots & \\
& & & & \vdots & 1 & \vdots & & \ddots & \ddots & \ddots \\
& & & & \vdots & \vdots & \vdots & & & \ddots & \ddots
\end{bmatrix}
\end{equation}
The exact position of the one entry in the typically large and sparse matrix depends on the mesh topology as well as the global numbering of the degrees of freedom.
For the discretization of other operators, a similar argument holds, as the shown procedure is irrespective of the choice of weak form or basis function.
For example, one could discretize the gradient of a function $\nabla u$ using an upwind Finite Difference formulation in fluid mechanics for resolving convective terms.
An equivalent Finite Element method can be assembled by producing a weak form as given in the above example, choosing the same collocation method and carrying out the integration numerically.
However, one important difference is that one cannot choose the test space to be equivalent to the trial space.
This would yield a symmetric system that does not correspond to an upwind Finite Difference formulation and is also not stable in the case of solving a pure advection equation.
One must instead choose a test space with asymmetric test functions to account for the notion of an upwind node, thus yielding a Petrov Galerkin scheme \cite{brooksStreamlineUpwindPetrovGalerkin1982}.
We can as a result summarise the FDM to be a special instance of the CG FEM.
On the one hand, integration is restricted to a collocation method and on the other hand the Jacobian mapping from reference to physical elements is constant throughout the domain.
This close relationship has also been hinted at by analysis of boundary value problems by Thomèe \cite{thomeeFiniteDifferenceFinite1984}.
For the sake of achieving the same discretization, the use of Finite Differences over Finite Elements becomes apparent from the discussion above.
Most strikingly, the process of producing a local stencil is vastly more straightforward than performing element-wise assembly and gathering the weak form integrals in a global, sparse linear system.
Each element-wise operation in assembly would otherwise require the evaluation of the mesh jacobian for the requested element, that is the mapping from the reference to physical space.
Furthermore, this constant stencil enables Finite Difference schemes to operate in a matrix-free manner easily.
For larger systems, this can help to avoid a large amount of allocated memory, thus being suited well for modern hardware architectures that are typically memory-bound.
These advantages are however offset by some topological restrictions on the mesh.
The simplicity of a constant stencil also implies that the mesh must not deviate from a cartesian geometry.
Otherwise, additional complexity is introduced since Eq. \ref{eq:fd-laplacian} becomes a stencil in the reference domain that has to be mapped to the physical domain.
This would still save the computational effort to assemble the weak form.
However, since this process only has to be carried out once for the reference element, the computational impact can be held low by pre-computing the integrand.
\subsection{Finite Volume Method}\label{sec:fvm}
In a similar vein to the FDM, the use of Finite Volumes might appear distinctly different from the idea of Finite Elements.
Here, we make extensive use of Stokes' theorem to replace volume with hull integrals in conservation laws \cite{eymardFiniteVolumeMethods2000}.
There exist different formulations of this method, most namely a cell- and vertex-centered form.
The main difference lies in where the solution is stored.
In the former case, the solution is stored at the polygonal cell centers that are spanned by the mesh vertices.
The latter, instead, directly uses these vertices as solution points \cite{moukalledFiniteVolumeMethod2016}.
In this case, one does not operate on the computational mesh directly but rather on its dual.
As the cell-centered formulation is considerably more widely used, we investigate this variant further in the following.
It can be shown however that the FVM can simply be considered a Bubnov Discontinuous Galerkin method of polynomial order zero.
To illustrate this, we again turn to Eqs. \ref{eq:advection-strong} and \ref{eq:advection-weak-dgm} describing the strong and weak form of the advection equation.
A Finite Volume approximation in conservation form is:
\begin{equation}\label{eq:advection-fvm}
\int_\Omega \partial_t \alpha \,dx + \int_\Omega \underline{u} \nabla \alpha \,dx = \int_\Omega \partial_t \alpha \,dx + \int_{\partial\Omega} \alpha (\underline{u}\cdot\underline{n}) \,ds
\end{equation}
Apart from the presence of a test function $v$ in Eq. \ref{eq:advection-weak-dgm}, the second integrand simply represents the net flux of the conserved quantity $\underline{u}\alpha$ over the set of element boundaries.
For Eqs. \ref{eq:advection-weak-dgm} and \ref{eq:advection-fvm} to be equivalent in this case, the third integrand resulting from partial integration has to vanish in addition.
However, this can be shown trivially by setting the order of the polynomial space for the trial and test function to zero.
Then, the derivative of the test function vanishes and thus the entire term does not contribute to the weak form.
After performing this step, the test function is still present in the remaining parts of Eq. \ref{eq:advection-weak-dgm}.
For the remaining terms to be equivalent, they must vanish out of the equation as well.
This can be accomplished straightforwardly by fixing the value of the test function to be unity.
In the weak form, this step is admissible since it must hold for all instances of $V$.
As $1 \in V^0$ where $V^0$ is the space of constant polynomials, this statement holds in particular for a Bubnov Galerkin scheme, as trial and test space must be identical.
The qualitative similarity of both schemes is illustrated in Figure \ref{fig:fv-vs-dg-2d}.
\begin{figure}[htbp!]
\centering
\captionsetup[subfigure]{justification=centering}
\begin{subfigure}[c]{0.49\textwidth}
\centering
\includegraphics[]{Figs/Theory/fv-2d.eps}
\caption{}
\label{fig:fv-2d}
\end{subfigure}
\hfill
\begin{subfigure}[c]{0.49\textwidth}
\centering
\includegraphics[]{Figs/Theory/dg-2d.eps}
\caption{}
\label{fig:dg-2d_2}
\end{subfigure}
\caption{Comparison of FVM (a) versus DGM (b) on an identical quadrilateral triangulation.
Coupled DoFs are marked in identical colors.
For the FVM, one must first reconstruct the values of the DoFs at the mesh facets to then compute the numerical flux.}
\label{fig:fv-vs-dg-2d}
\end{figure}
For both schemes, DoFs are entirely local to the cell and coupling happens through the calculation of a numerical flux - or in more formal terms, through the evaluation of the hull integral in the corresponding weak form.
However, the FVM only stores one DoF per cell which has notable implications for the calculation of the numerical flux.
This means that as a first step, the cell values have to be reconstructed at the mesh facets. These reconstructed DoFs which depend on the cell values that they interpolate between can then be coupled to their counterparts at opposing mesh facets. These relationships are denoted by DoFs being colored identically (coupled) and being transparent (reconstructed) in Figure \ref{fig:fv-2d}.
For a DGM scheme of first order or higher, this interpolation step oftentimes is not necessary if the quadrature scheme is chosen carefully.
For a collocation method (see section \ref{sec:fdm} for a more thorough discussion), one does not need to tabulate the full list of DoF values at the set of facet quadrature points, but rather only a small subset of DoFs that are owned by the facet \cite{hesthavenNodalDiscontinuousGalerkin2008}.
In summary, the FVM can again be considered as a special instance of the Bubnov type DGM, where the shared polynomial space $V$ is taken to be of constant order and the test function $v$ is set to be unity.
We note similarly to the discussion on the FDM that this simplification of Eq. \ref{eq:advection-weak-dgm} brings with it some computational advantages that can be offset by sacrificing flexibility.
The absence of a true weak form in a Finite Volume formulation again means that actual assembly is not needed.
In addition, one may omit the transformation from the reference to physical space, as interpolating degrees of freedom to mesh facets and forming a finite sum of these contributions can be done on the mesh directly.
The caveat of this approach is that FVM in principle is bound to be at most first-order accurate.
In practice, this does not hold as the FVM can be extended to higher orders by applying higher order flux reconstruction techniques \cite{zhouNumericalComparisonWENO2001,shuHighorderFiniteDifference2003}.
Such techniques can however quickly become computationally expensive as well with increasing order.
This is achieved in this case by widening the stencil for polynomial reconstruction, increasing memory and time complexity by a considerable amount \cite{liuRobustReconstructionUnstructured2013}.
\subsection{Summary}
In this section, we have established a common framework to formulate the most prevalent grid-based numerical schemes for the solution of PDEs.
It turns out that the DG Method possesses enough flexibility to incorporate the CGM, FDM and FVM by imposing a set of restrictions.
A summary of the results presented in this section on how the schemes compare overall is given in Table \ref{tab:schemes-comparison}.
\begin{table}
\centering
\caption{Comparison of the individual restrictions that the presented schemes impose. Certain simplifications bring with them computational advantages, as discussed above.}
\label{tab:schemes-comparison}
\resizebox{\linewidth}{!}{%
\begin{tblr}{
hline{1,6} = {-}{0.08em},
hline{2} = {-}{0.05em},
}
\textbf{Scheme} & \textbf{Geometry} & \textbf{Function Space} & \textbf{Weak Form} & \textbf{Quadrature}\\
DGM & Arbitrary & Discontinuous ($L^2$) & Full & Arbitrary\\
CGM & Arbitrary & Continuous ($H^1$) & {No hull integrals \\over interior facets} & Arbitrary\\
FDM & Cartesian & Continuous ($H^1$) & {No hull integrals \\over interior facets} & Collocation\\
FVM & Arbitrary & {Discontinuous ($\mathbb{P}^0 \in L^2$),\\Bubnov Galerkin} & No volume integrals & None required
\end{tblr}
}
\end{table}
This framework is not only of theoretical use.
Rather, such a common formulation also enables us to combine these schemes arbitrarily to solve larger problems.
As each scheme possesses strengths and means to gain computational efficiency, this is an important result since it enables efficiently mixed discretizations of multiphysics problems.
Establishing a practical method to achieve exactly this will be the content of the next section.
Before concluding the discussion on relating the above numerical schemes, we add an important remark.
There do exist several extensions to these methods that in general do not fit into the framework that has been established.
We will list a few examples for the sake of illustration.
There do exist formulations of the FDM that can capture domains with less regularity, see for example \cite{fornbergGenerationFiniteDifference1988,visbalUseHigherOrderFiniteDifference2002,zhangThreedimensionalElasticWave2012,perroneGeneralFiniteDifference1975}.
One can also find alternative discretization methods based on FDM in the literature that encompass the notion of missing structure in grids more naturally, such as understanding vertices as centroids of Voronoi cells \cite{sukumarVoronoiCellFinite2003}
As mentioned previously, there exist various formulations of the FVM that extend far beyond the original restriction of being first-order accurate.
The cell-averaged flux is then determined in terms of reconstructing polynomials that in theory can be of arbitrary order.
Such approaches per se do not fit well into the above given DG scheme but do however achieve similar results.
\section{Method for Assembling Numerical Schemes}
The overarching goal of this section is to identify a suitable combination of numerical schemes for a given multiphysics problem that is stable and accurate on the one hand, but also performant with regards to a specific choice of hardware on the other hand.
With the set relations between methods discussed in section \ref{sec:baseline}, we can now use the simplifications and thus computational advantages that each scheme presents.
That is, we follow the guideline to impose as many restrictions as possible whilst sustaining enough degrees of freedom to accurately capture the behavior of a given PDE.
In this way, we aim to provide the application expert with a recommendation on which schemes to use for a particular computational problem.
Our key concerns are, above all, to make this recommendation unambiguous.
Also, we focus on providing recommendations that will produce a stable solution and do not require tuning of artificial parameters.
If at least either of both requirements were not given, the usefulness of this method would be lost as one would have to undergo substantial experimentation to attain a valid solution.
Thus, by proposing such a method, we aim to give a sensible tradeoff between practicality and reproducibility on the one hand and utmost performance at the cost of possibly many model-building iterations and tuning on the other hand.
\subsection{Preliminary Assumptions}\label{sec:restrictions}
As a starting point, it has to be stated that encompassing the entire state of research on such schemes would be an impossible task.
The likewise formalization of a common framework is equally challenging as a consequence and thus not considered in this work.
Instead, we follow the path of introducing some restrictions that are on the one hand enough to construct a unifying scheme but on the other hand not too strict such that the efficient solution of real-world problems would be out of scope.
Thus, we propose the following restrictions to arrive at a one-to-one choice of numerical schemes:
\begin{enumerate}
\item Only Bubnov Galerkin schemes are considered, that is, we omit Petrov Galerkin methods.
The former restricts the choice of test space to be identical to the trial space.
As such, we omit schemes that for instance use weighted functions or stencils to account for flow fields.
An example of such schemes would be the Streamline Upwind Petrov Galerkin (SUPG) method \cite{brooksStreamlineUpwindPetrovGalerkin1982}.
This restriction is essential to obtain an unambiguous choice of method, as the notion of Petrov Galerkin methods does not imply any particular choice of function space.
\item We omit function spaces for approximation other than the $L^2$ and $H^1$ Sobolev spaces.
There exists a vast variety of so-called Mixed Finite Element schemes that use Finite Elements based on different or composite function spaces with unique properties \cite{ernTheoryPracticeFinite2004}.
For example, one may construct function spaces that can exactly fulfill divergence-free properties ($H(\text{div})$) or conditions based on the rotation of a field ($H(\text{curl})$).
The specific choice of Finite Element then would require a considerable amount of expertise and would warrant a complex decision process of its own.
We thus focus on scalar-, vector- and tensor-valued Lagrange elements solely.
They have been shown to encompass a similar solution space as well and perform comparably for fluid and electromagnetic problems \cite{cockburnLocallyDivergencefreeDiscontinuous2004,hughesStabilizedMixedDiscontinuous2006}.
\item Closely related to the previous statement, we restrict the solution space further by requiring that only Finite Elements utilizing Lagrange polynomials should be used.
As the standard scalar- and vector-valued $\mathbb{P}^k$ and $\mathbb{Q}^k$ Finite Elements, being by far the most popular choices use exactly this family of polynomials, this requirement is weaker in practice than it might seem at first glance \cite{lagrange-0,lagrange-1}.
\item We impose a coarse taxonomy to classify the qualitative behavior of a given PDE, that is, we specify limits regarding the leading coefficients of the differential operators.
This should indicate whether the physical process described by the PDE is either more dissipative or more convective by nature.
We thus introduce a more physical interpretation than the considerably stricter coercivity measures employed in functional analysis.
Our taxonomy closely follows the classes that were proposed by Bitsadze \cite{bitsadze1988some}.
We do not claim this classification to be universally accurate.
In practice, it has been shown however that having discrete cut-off values to disambiguate classes of PDE eases the choice of numerical scheme for application experts considerably.
Hence, we choose to follow this path despite some shortcomings regarding generality.
\item We only investigate systems of PDEs with differential operators up to second order.
These are most common within physical processes and enable a wider range of numerical schemes to be used.
For instance, equations of higher order such as the Cahn-Hilliard equation, would require the use of Finite Elements where up to third-order derivatives are defined.
Such elements of high continuity are cumbersome to derive and are rarely used.
Instead, we propose that in such cases the system should be reformulated as a mixed problem, where in the mentioned example one could represent the quantity of interest as two fields with second derivatives each.
This technique is also regularly used in practice.
\item For Finite Volume methods, we use the cell-centered variant as already outlined in section \ref{sec:fvm} instead of the vertex-centered or cell-vertex formulation.
This is due to this form being the most popular choice in practice.
Furthermore, it resembles the other schemes more naturally as has been shown in previous sections.
\end{enumerate}
\subsection{PDE Classification}\label{sec:pde-classification}
To find a numerical scheme that produces stable results, knowing the qualitative behavior of the system oftentimes is a necessity.
In particular, this means that the specific capabilities that a chosen numerical scheme possesses need to reflect the properties that the system presents.
We illustrate this by example. We once again investigate the simple advection equation (Eq. \ref{eq:advection-strong}), which is known to be first-order hyperbolic.
Such systems are prone to either preserving or even amplifying discontinuities given in the initial condition and thus the capability of accurately representing these should be incorporated into the choice of numerical schemes.
Suitable candidates would then be a Finite Volume or Discontinuous Galerkin Method.
However, the Finite Difference Method using a centered stencil or the Continuous Galerkin Method would give suboptimal results. The strong imposition of continuity in the domain would then yield spurious oscillations that affect stability.
We hence require the system of PDEs to be classified firsthand. We follow the popular taxonomy of second-order PDEs that can for example be found in the book by Bitsadze \cite{bitsadze1988some}, but follow a more general method for determining the appropriate class \cite{pinchoverIntroductionPartialDifferential2005}.
That is, we define a singular governing equation in the form of a PDE to be either elliptic, parabolic or hyperbolic, depending on the shape that its characteristic quadric takes in space.
More formally, given a differential operator $L$ of the form:
\begin{equation}\label{eq:pde-classification}
L[u] = \sum\limits_{i,j=1}^{n} a_{ij} \frac{\partial^2 u}{\partial x_i \partial x_j}
\end{equation}
where $x_i$ are the dependent variables and $a_{ij}$ is the matrix forming the coefficients of the highest spatial derivatives.
Considering the eigenvalues $\lambda_i$ of $a_{ij}$, $L$ is called
\begin{itemize}
\item elliptic, if all $\lambda_i$ are either positive or negative,
\item parabolic, if at least one eigenvalue is zero and all others are either positive or negative,
\item hyperbolic, if at least one eigenvalue is positive and at least one is negative.
\end{itemize}
The characterization of first-order differential operators is more straightforward, however.
It can be shown that first-order PDEs with constant, real coefficients are always hyperbolic.
This condition is met for most cases relevant to engineering or physical applications.
More precisely, a first-order PDE is hyperbolic, if the resulting Cauchy problem is uniquely solvable.
In the case of real, constant coefficients, the polynomial equation for each variable has to admit $n$ solutions for an equation of order $n$ while keeping all other variables constant.
In the present case, this is trivially true.
We apply this classification for each governing equation of the independent variables for a given multiphysics problem.
In practice, one may oftentimes identify the class by the differential operators that frequently appear in a given PDE.
For example, a PDE that only has a laplacian as a spatial differential operator - such as the Laplace equation $\Delta u = 0$ or the heat equation $\partial_t u - \Delta u = 0$ exhibits dissipative behavior and is prototypical for elliptic and parabolic PDEs.
Oftentimes, one can easily identify a differential operator as parabolic if it has an elliptic operator in its spatial derivatives and an additional temporal derivative, as is exactly the case for the heat equation.
Both the abovementioned classes of PDE are dissipative, the reason being that PDEs of second order can only have discontinuous derivatives along their characteristics.
Since elliptic differential operators lack any characteristics, they strictly admit smooth solutions in that sense \cite{pinchoverIntroductionPartialDifferential2005}.
Thus, we associate this qualitatively dissipative behavior with elliptic and parabolic PDEs as defined above.
However, the advection equation (Eq. \ref{eq:advection-strong}) only has the gradient as a spatial differential operator, representing purely convective behavior.
Exactly this behavior of transporting information through the domain with finite speed is associated with the wave-like character of hyperbolic equations.
Figure \ref{fig:pde-classification} gives an overview of the classes of PDEs considered.
\begin{figure}[htbp!]
\centering
\resizebox{\linewidth}{!}{%
\begin{tikzpicture}[node distance = 2cm]
\node (toplevel) [element] {PDEs of (up to) second order};
\node (continuous) [element, below of=toplevel, xshift=-4cm] {Diffusive / Continuous};
\node (discontinuous) [element, below of=toplevel, xshift=4cm] {Convective / Discontinuous};
\node (elliptic) [element, below of=continuous, xshift=-2cm] {Ellptic};
\node (parabolic) [element, below of=continuous, xshift=2cm] {Parabolic};
\node (1hyperbolic) [element, below of=discontinuous, xshift=-2cm] {First order hyperbolic};
\node (2hyperbolic) [element, below of=discontinuous, xshift=2cm] {Second order hyperbolic};
\node (character) [description,left of=continuous, xshift=-3.5cm] {Character of the PDE / solution};
\node (type) [description,below of=character] {Class of the PDE};
\draw[arrow] (toplevel) -| (continuous);
\draw[arrow] (toplevel) -| (discontinuous);
\draw[arrow] (continuous) -| (elliptic);
\draw[arrow] (continuous) -| (parabolic);
\draw[arrow] (discontinuous) -| (1hyperbolic);
\draw[arrow] (discontinuous) -| (2hyperbolic);
\begin{pgfonlayer}{background}
\node [fill=orange!30,fit=(character) (discontinuous)] {};
\node [fill=blue!30,fit=(type) (2hyperbolic)] {};
\end{pgfonlayer}
\end{tikzpicture}
}%end resizebox
\caption{Classification of PDEs up to second order by qualitative nature and types following \cite{bitsadze1988some}.}
\label{fig:pde-classification}
\end{figure}
In alignment with the postulate at the beginning of this section, we aim to solve a given class of PDEs with as few degrees of freedom as possible whilst not over-constraining the solution.
Most importantly, discontinuities that might appear in the solution should be properly accounted for and reflect the choice of numerical scheme.
The direct consequence is that methods enforcing continuity should be used for problems that qualitatively exhibit high regularity and continuity.
From the previous discussion, it becomes apparent that this is the case for Finite Differences and Continuous Galerkin Finite Elements.
Problems that either conserve or even develop shocks however should be solved using methods that naturally allow for such.
This means that either Finite Volumes or Discontinuous Galerkin Finite Elements suit this requirement most naturally.
\subsection{Domain Geometry}\label{sec:domain-geometry}
As discussed in section \ref{sec:fdm}, the discretization using Finite Differences inherently assumes an even grid with uniform spacing between nodal points.
The direct consequence of this simplification is that assembly can be done in the computational domain directly and in an equal manner for every node point.
In general, if the domain has a particularly simple shape, for example, a hypercube and does not contain any holes, it can be triangulated using a cartesian grid.
Thus, if the discrete domain fulfills these conditions and the differential operators form an elliptic or parabolic PDE, using the FDM to efficiently assemble the global system is advisable.
For FVM, CGM and DGM, regularity of the computational domain, in general, does not pose any considerable advantages that may accelerate the assembly of the discretized system.
\subsection{PDE Linearity}\label{sec:pde-linearity}
Another crucial property to assess is its linearity.
In this case, we disambiguate strictly linear, semilinear, quasilinear and fully nonlinear equations, following the definition given in Evans \cite{evansPartialDifferentialEquations2010}:
A $k$-th order Partial Differential Equation of the form $$F(D^k u(x), D^{k-1} u(x),...,Du(x), u(x), x) = 0$$ is called:
\begin{enumerate}
\item linear, if it has the form
$$ \sum\limits_{\lvert \alpha \rvert \leq k} a_\alpha (x) D^\alpha u = f(x) $$
for given functions $a_\alpha (\lvert \alpha \rvert \leq k), f$. The PDE is homogeneous if $f \equiv 0$.
\item semilinear, if it has the form
$$ \sum\limits_{\lvert \alpha \rvert = k} a_\alpha (x) D^\alpha u + a_0 (D^{k-1}u,...,Du,u,x) = 0 $$
\item quasilinear, if it has the form
$$ \sum\limits_{\lvert \alpha \rvert = k} a_\alpha D^\alpha u (D^{k-1}u,...,Du,u,x) + a_0 (D^{k-1}u,...,Du,u,x) = 0 $$
\item The PDE is fully nonlinear if it depends nonlinearly upon the highest-order derivatives.
\end{enumerate}
While linearity does not pose much of a problem for elliptic or parabolic equations, it plays an important role in whether a discretization is stable for hyperbolic equations.
The theory of nonlinear flux limiters is in general well researched for DG methods and largely profits from extensive developments that originally stem from the FVM.
However, accurate computation and implementation remain to be a hurdle in practice.
There have thus been several approaches to circumvent this issue, for example, by switching to an FV scheme in regions where there might be problems regarding the stability of the solution \cite{maltsevHybridDiscontinuousGalerkinfinite2023,sonntagShockCapturingDiscontinuous2014}.
As the overarching goal of this method is to provide straightforward guidance for end users, we will omit such approaches that must in most cases be implemented in a custom and rather particular fashion in favor of simplicity.
We thus recommend that, for equations where the solution is not likely to require many nonlinear iterations per time step, one may safely use a DG scheme.
In other cases where stability cannot be assured universally, one should rather switch to a Finite Volume formulation that may be overly diffusive, but on the upside is guaranteed to yield a stable solution.
\subsection{Computing Environment}\label{sec:hardware}
Within the last decade, advancement of computer hardware has been known to slowly hit the so-called \textit{memory wall} \cite{mckeeMemoryWall2011}.
That is, applications tend to be bound by the capability of the hardware to transfer memory instead of performing arithmetic operations.
This in particular holds for numerical simulations that are performed using many workers or large problems.
In such cases, the evaluation of sparse matrix-vector products poses high loads regarding memory bandwidth \cite{arndtExaDGHighOrderDiscontinuous2020}.
Then, there are numerical schemes that naturally lend themselves toward parallelism and others that are more memory-bound by design.
Thus, for a given computing hardware that puts enough emphasis on massive parallelism and two numerical schemes performing (nearly) identically, one should prefer the one that handles parallelism better.
We thus naturally arrive at the question of where one should disambiguate between massively parallel and other, regular hardware.
There are essentially two factors that would affect such a classification.
First, the hardware architecture itself plays an important role.
We may on the one hand solve a PDE on the classic CPU architecture that is capable of performing arithmetic on many precision levels and use many specialised instruction sets, such as AVX or fused multiply-add (FMA).
Another possibility is the use of highly parallel computing units, such as general-purpose graphics processing units.
Those however have a memory layout and instruction set that is much more tailored toward one purpose.
In the case of a GPU, this is medium to low precision operations with comparably low memory intensity but instead high arithmetic effort.
The other deciding factor is the amount of workers involved in the simulation process.
The more workers exist, the more processor boundaries are present and thus more information has to be shared between processors.
For some schemes, this overhead due to the exchange of memory between workers can become prohibitive.
Within the Finite Volume Method, for instance, parallel efficiency measured in GFlops/s starts to drop notably within the regime of 50 to 100 workers \cite{marshallFinitevolumeIncompressibleNavier1997}.
The quantitative drop-off also depends on the specific implementation, since other authors report slightly different results.
Fringer et al. for instance note a decline in parallel efficiency for a Finite Volume solver starting at 32 workers \cite{fringerUnstructuredgridFinitevolumeNonhydrostatic2006}.
Thus, as a general guideline, we recommend employing methods that are suited for highly parallel environments at roughly 50 or more CPU workers.
For execution on massively parallel architectures, such as GPUs, the switch to such algorithms is considered necessary to obtain good efficiency.
\subsection{Problem Scale}\label{sec:problem-scale}
Another deciding factor for whether adaptivity is needed or not is the presence of multiple length scales in a multiphysics model.
We follow the definition given in \cite{eMultiscaleModeling2011,ePrinciplesMultiscaleModeling2011} and characterize a PDE-governed problem to have a Multiscale nature if models of multiple spatial or temporal scales are used to describe a system.
Oftentimes, this is the case if equations are used that originate from different branches of physics, such as continuum mechanics versus quantum mechanics or statistical thermodynamics.
This may on the one hand be a physical process with slow and fast dynamics, for example, in chemical reaction networks.
Then, the multiscale nature shows itself in the time domain of the problem.
Another example commonly encountered problem in alloy design is the evolution of the temperature field and phase kinetics during heating and solidification.
In this case, various length scales can be involved, such as in processes involving laser heating.
The temperature gradients then involve resolutions at a scale of around \SI{1e-5}{\metre}, whereas the width of a solidification front rather goes down to a sub-micrometer scale, that is, around \SI{1e-7}{\metre} \cite{zimbrodModellingMicrostructuresInsitu2021}.
About the previous definition, we have one model that is governed by laws of macroscale thermodynamics (that is, the heat equation).
The other part of physics present is typically described by the evolution of a phase field.
The corresponding equations of this model are however derived from the formulation of a free energy functional from Landau theory \cite{landauTheoryPhaseTransitions1937}.
Due to the wide variety of physical processes and combinations thereof, formulating general criteria for the presence of a multiscale problem from a mathematical point of view is challenging.
To the knowledge of the authors, there do not exist any metrics in the literature that would enable such a classification.
We instead rely on the knowledge of the application expert who we assume to be familiar with the physics that should be captured.
For a rough disambiguation, however, one may use the definition given above.
Such multiscale phenomena are prohibitively expensive to resolve on a uniform mesh due to the nonnegligible difference in the dynamics of the system.
One option to efficiently resolve the physics at multiple scales is to employ different grids and solve the resulting problem in parallel.
This has, for example, been done for the above-mentioned case, specifically metal additive manufacturing \cite{ghanbariAdaptiveLocalglobalMultiscale2020}.
A rather effective, alternative approach is the modification of the governing equations such that they become tailored to a specific numerical scheme.
For instance, the well-known phase field model has been adapted using specialized stencils to the FDM such that spurious grid friction effects are eliminated \cite{fleckSharpPhasefieldModeling2023,fleckFrictionlessMotionDiffuse2022}.
This approach however requires extensive knowledge about the numerics as well as the physical nature of a given problem.
Another possibility that requires fewer adaptions of the code to the specific problem is to make use of grid adaptive algorithms.
This approach for the problem presented is a popular alternative and has been implemented multiple times \cite{proellHighlyEfficientComputational2023,olleakEnablingPartScaleScanwise2022,olleakPartscaleFiniteElement2020,olleakScanwiseAdaptiveRemeshing2020a}.
Thus, grid adaptivity plays a key role in creating solutions to such problems, if the domains are not to be resolved on different discretizations entirely.
Numerical methods as a consequence need to reflect on this requirement and as such, Finite Difference methods are not suitable for such types of problems.
CG Finite Element methods do enable grid as well as polynomial degree adaptivity.
Yet, the imposition of hanging node constraints is oftentimes not trivial.
Though there have been considerable strides toward easy and intuitive handling of hanging nodes for continuous elements \cite{solinArbitrarylevelHangingNodes2008,bangerthDataStructuresRequirements2009}, these methods naturally fall short of the inherently decoupled nature of DoFs present in discontinuous methods.
Whereas grid adaptivity is easily realizable within FVM, there is little room for adaptivity regarding the order of approximation and can at best be achieved using varying reconstruction stencils \cite{shuHighorderFiniteDifference2003}.
By far, the most naturally suited method for h- as well as p-adaptivity is the DG FEM.
The locality of DoFs enables the splitting of cells without the need for hanging node constraints.
The same argument applies to altering the degree of a Finite Element, as additional DoFs within the cell need not be attached to a counterpart on its neighbors.
\subsection{Summary}
We may now condense the various aspects of choosing appropriate numerical schemes as follows into a unifying method, given the restrictions we posed in section \ref{sec:restrictions}.
First, we take three sets of inputs that are of practical relevance: the mathematically formulated, continuous problem, the computational domain that one wishes to solve the former on, and the configuration of the target hardware.
To design the intended decision process, we start by evaluating the decision metrics that impact the target scheme in the most general manner at first.
The general question of whether the prescribed system of PDEs requires an efficient solution on a large scale fulfills this requirement here.
By the term large scale, we understand state-of-the-art computing hardware on massively parallel architectures.
That decision in turn is influenced by two factors: One may directly intend to efficiently solve the system of PDEs on that hardware, or the multiscale nature of the problem demands such a computing environment.
If either is the case, solving the entire system using the HDG method is advisable due to the resulting locality of the problem.
The remaining parts of the decision process depend on the class of PDE present.
From here on, we operate in a field-wise manner and classify the system of PDEs for each independent variable separately.
If a PDE is convective in character, that is, hyperbolic, we recommend the use of numerical schemes that incorporate discontinuous approximations.
But, if a problem is diffusive by nature, the solution will be continuous and thus the use of continuous approximations is more advisable.
In the case of the former, following the discussion in section \ref{sec:pde-linearity}, a final disambiguation must be made regarding linearity.
If the PDE is linear or semilinear, a DG scheme can be applied due to the unlikeliness of stability issues.
Otherwise, the use of a simple FV scheme is more advisable to obtain a stable solution without having to iterate through many different choices of flux limiters in a trial-and-error fashion.
Regarding the continuous schemes, as has been explained in sections \ref{sec:fdm} and \ref{sec:domain-geometry}, the configuration of the domain geometry plays an important role in the efficiency of the overall scheme.
If the domain is cartesian, irrespective of dimensionality, the FDM can deliver accurate results with a considerably decreased amount of arithmetic operations.
The conceptual flexibility of the FEM regarding the domain is then unnecessary.
In the other case though where the domain is topologically more complex, relying on FEM algorithms that account for the necessary global mappings is more appropriate.
It would of course be possible to identify a middle ground between both schemes, for example, when a simple and prescribed transformation can be applied to the entire domain.
This would for example be the case for systems that can be described by polar coordinates.
However, few computer codes implement such functionality.
As the focus of this method lies on practicality and usefulness, we rather choose a method that can make use of widespread and established computer codes and thus omit these possibilities.
As a result, we obtain one process that guides the user through iteratively selecting the most appropriate combination of numerical schemes for a given, fixed and well-defined set of inputs.
This method may be summarised in a flow chart, which is depicted in Figure \ref{fig:decision-process}.
\begin{figure}[htbp!]
\centering
\resizebox{\linewidth}{!}{%
\begin{tikzpicture}[node distance = 2cm]
\node (start) [startstop] {Start};
\node (input-pde-system) [io, right of=start, xshift=2cm] {\textbf{I1}: System of PDEs};
\node (input-compute) [io, right of=input-pde-system, xshift=2cm] {\textbf{I2}: Hardware configuration};
\node (proc-compute) [subprocess, right of=input-compute, xshift=2cm] {\textbf{P1}: Classify hardware scale (\ref{sec:hardware})};
\node (proc-adaptivity) [subprocess, below of=proc-compute] {\textbf{P2}: Evaluate length scales (\ref{sec:problem-scale})};
\node (dec-compute) [decision, left of=proc-adaptivity,xshift=-5cm, yshift=-2cm] {\textbf{D1}: Massive parallelism required?};
\node (classify-pde) [subprocess, below of=dec-compute,yshift=-1cm] {\textbf{P3}: Classify PDE for current field (\ref{sec:pde-classification})};
\node (advance) [process, left of=classify-pde, xshift=-2cm] {Advance to next field};
\node (dec-class) [decision, below of=classify-pde] {\textbf{D2}: Classification};
\node (geometry) [io, below of=dec-class,xshift=-3cm] {\textbf{I3}: Domain Geometry};
\node (proc-linearity) [subprocess, right of=geometry, xshift=4cm] {\textbf{P4}: Evaluate PDE Linearity (\ref{sec:pde-linearity})};
\node (dec-hyperbolic) [decision, below of=proc-linearity] {\textbf{D4}: PDE Linearity};
\node (dec-geometry) [decision, below of=geometry] {\textbf{D3}: Domain Regularity};
\node (out-fdm) [res, below of=dec-geometry,xshift=2cm, yshift=-1cm] {Finite Difference};
\node (out-cgm) [res, below of=dec-geometry,xshift=-2cm, yshift=-1cm] {Continuous Galerkin};
\node (out-fvm) [res, below of=dec-hyperbolic, yshift=-1cm] {Finite\\Volume};
\node (out-dgm) [res, right of=out-fvm, xshift=2cm] {(Hybridizable) Discontinuous Galerkin};
\node (dec-done) [decision, below of=out-fdm,xshift=2cm,yshift=-1cm] {\textbf{D5}: All fields assigned?};
\node (stop) [startstop, below of=dec-done,yshift=-0.5cm] {Stop};
\draw[arrow] (start) -- (input-pde-system);
\draw[arrow] (input-pde-system) -- (input-compute);
\draw[arrow] (input-compute) -- (proc-compute);
\draw[arrow] (proc-compute) -- (proc-adaptivity);
\draw[arrow] (proc-adaptivity) -| (dec-compute);
\draw[dashed,arrow] (dec-compute) -| node[anchor=south,pos=.25] {yes} (out-dgm);
\draw[arrow] (dec-compute) -- node[anchor=west] {no} (classify-pde);
\draw[arrow] (classify-pde) -- (dec-class);
\draw[arrow] (dec-class) -| node[anchor=south,pos=.25, text width = 1.8cm] {parabolic or elliptic} (geometry);
\draw[arrow] (geometry) -- (dec-geometry);
\draw[arrow] (dec-geometry) -| node[anchor=east] {Irregular} (out-cgm);
\draw[arrow] (dec-geometry) -| node[anchor=west] {Cartesian} (out-fdm);
\draw[arrow] (dec-class) -| node[anchor=south,pos=.25] {hyperbolic} (proc-linearity);
\draw[arrow] (proc-linearity) -- (dec-hyperbolic);
\draw[arrow] (dec-hyperbolic) -- node[anchor=west, text width=2cm] {Quasilinear, Nonlinear} (out-fvm);
\draw[arrow] (dec-hyperbolic) -| node[anchor=north,pos=.25, text width = 2cm] {Semilinear, Linear} (out-dgm);
\draw[arrow] (out-cgm) -- (dec-done);
\draw[arrow] (out-fvm) -- (dec-done);
\draw[arrow] (out-fdm) -- (dec-done);
\draw[arrow] (out-dgm) -- (dec-done);
\draw[arrow] (dec-done) -| node[anchor=north,pos=.1] {no} ([shift={(-6.2cm,0mm)}]dec-done.west)-- ([shift={(-5.5cm,0mm)}]classify-pde.west)|- (advance);
\draw[arrow] (advance) -- (classify-pde);
\draw[arrow] (dec-done) -- node[anchor=east] {yes} (stop);
\draw[dashed,arrow] (out-dgm) |- node[anchor=south,text width=3.5cm,pos=.75] {For massively parallel problems, apply to all fields} (stop);
\end{tikzpicture}
}%end resizebox
\caption{Graphic summary of the proposed process for choosing appropriate numerical schemes. Inputs (\textbf{I}) are given by purple trapezoids, decision points (\textbf{D}) by white diamonds and processes (\textbf{P}) by orange rectangles. Processes with additional vertical bars denote more complex processes and have references to their respective sections. Results are shown in green trapezoids.}
\label{fig:decision-process}
\end{figure}
\section{Examples}
The purpose of this section is to walk through the proposed method employing two simple example PDEs.
Although these are not multiphysics problems, they may be combined in theory.
\subsection{Allen Cahn Equation}\label{Examples-Allen-Cahn-Equation}