-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathindex.html
478 lines (429 loc) · 29 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>LeanDojo: Theorem Proving with Retrieval-Augmented Language Models</title>
<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png">
<link rel="manifest" href="/site.webmanifest">
<meta name="msapplication-TileColor" content="#da532c">
<meta name="theme-color" content="#ffffff">
<script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
<script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
<link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
rel="stylesheet">
<link rel="stylesheet" href="./static/css/bulma.min.css">
<link rel="stylesheet" href="./static/css/bulma-carousel.min.css">
<link rel="stylesheet" href="./static/css/bulma-slider.min.css">
<link rel="stylesheet" href="./static/css/fontawesome.all.min.css">
<link rel="stylesheet" href="./static/css/academicons.min.css">
<link rel="stylesheet"
href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
<link rel="stylesheet" href="./static/css/index.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<script defer src="./static/js/fontawesome.all.min.js"></script>
<!--
<script src="./static/js/bulma-carousel.min.js"></script>
<script src="./static/js/bulma-slider.min.js"></script>
<script src="./static/js/index.js"></script>
-->
</head>
<body>
<section class="hero">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column has-text-centered">
<h1 class="title is-1 publication-title">LeanDojo: Theorem Proving in Lean using Language Models</h1>
<!-- <h3 class="title is-4 conference-authors"><a target="_blank" href="https://nips.cc/">NeurIPS 2023 (Datasets and Benchmarks Track), Oral presentation</a></h3> -->
<!--
<div class="is-size-5 publication-authors">
<span class="author-block">
<a target="_blank" href="https://yangky11.github.io/">Kaiyu Yang</a> <a href="mailto:[email protected]"><i class="fas fa-envelope"></i></a><sup>1</sup>,
<a target="_blank" href="https://aidanswope.com/about">Aidan Swope</a><sup>2</sup>,
<a target="_blank"
href="https://minimario.github.io/">Alex Gu</a><sup>3</sup>,
<a target="_blank" href="https://rchalamala.github.io/">Rahul Chalamala</a><sup>1</sup>,
<a target="_blank" href="https://peiyang-song.github.io/">Peiyang Song</a><sup>4</sup>,
<a target="_blank"
href="https://billysx.github.io/">Shixing Yu</a><sup>5</sup>,
<br>
<a target="_blank" href="https://www.linkedin.com/in/saad-godil-9728353/">Saad Godil</a><sup>2</sup>,
<a target="_blank" href="https://www.linkedin.com/in/ryan-prenger-18797ba1/">Ryan Prenger</a><sup>2</sup>,
<a target="_blank" href="http://tensorlab.cms.caltech.edu/users/anima/">Anima Anandkumar</a><sup>1 2</sup>
</span>
</div>
<div class="is-size-5 publication-authors">
<span class="author-block"><sup>1</sup>Caltech; </span>
<span class="author-block"><sup>2</sup>NVIDIA; </span>
<span class="author-block"><sup>3</sup>MIT; </span>
<span class="author-block"><sup>4</sup>UC Santa Barbara; </span>
<span class="author-block"><sup>5</sup>UT Austin </span>
</div>
<div class="is-size-5 publication-authors">
<span class="author-block"><i class="fas fa-envelope"></i> Correspondence to: [email protected]</span>
</div>
-->
<div class="column has-text-centered">
<div class="publication-links">
<span class="link-block">
<a target="_blank" href="https://arxiv.org/abs/2306.15626"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="ai ai-arxiv"></i>
</span>
<span>arXiv</span>
</a>
</span>
<span class="link-block">
<a target="_blank" href="https://github.com/lean-dojo"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fab fa-github"></i>
</span>
<span>Code</span>
</a>
<span class="link-block">
<a target="_blank" href="https://leandojo.readthedocs.io/en/latest/index.html"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-book"></i>
</span>
<span>Docs</span>
</a>
<a target="_blank" href="https://github.com/lean-dojo/ReProver"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fa fa-network-wired"></i>
</span>
<span>Models</span>
</a>
<a target="_blank" href="https://zenodo.org/doi/10.5281/zenodo.8016385"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-database"></i>
</span>
<span>Dataset (Lean 3)</span>
</a>
<a target="_blank" href="https://zenodo.org/doi/10.5281/zenodo.8040109"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-database"></i>
</span>
<span>Dataset (Lean 4)</span>
</a>
</span>
<a target="_blank" href="https://github.com/lean-dojo/LeanCopilot"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-wrench"></i>
</span>
<span>Lean Copilot</span>
</a>
</span>
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<section class="section">
<div class="container is-max-widescreen">
<div class="rows">
<div class="rows is-centered ">
<div class="row is-full-width">
<h2 class="title is-3"><span class="lean-infer">LLMs as Copilots for Theorem Proving</span></h2>
<video controls>
<source src="images/Lean Copilot demo.mp4" type="video/mp4">
Your browser does not support HTML video.
</video>
<br />
<p style="font-size: 125%">We introduce <a target="_blank" href="https://github.com/lean-dojo/LeanCopilot">Lean Copilot</a> for LLMs to act as copilots in Lean for proof automation, e.g., suggesting tactics/premises and searching for proofs. Users can use our model or bring their own models that run either locally (w/ or w/o GPUs) or on the cloud.</p>
</div>
</div>
</div>
</div>
</section>
<section class="section">
<div class="container is-max-widescreen">
<div class="rows">
<div class="rows is-centered">
<div class="row is-full-width">
<h2 class="title is-3"><span class="lean-infer">Overview of LeanDojo</span></h2>
<img src="images/LeanDojo.jpg" class="interpolation-image" alt="" style="display: block; margin-left: auto; margin-right: auto"/>
<br />
<p style="font-size: 125%"><b>Top right</b>: LeanDojo extracts proofs in <a target="_blank" href="https://leanprover.github.io/">Lean</a> into datasets for training machine learning models. It also enables the trained model to prove theorems by interacting with Lean's proof environment.</p>
<br />
<p style="font-size: 125%"><b>Top left</b>: The proof tree of a Lean theorem <a target="_blank" href="https://github.com/leanprover-community/lean/blob/cce7990ea86a78bdb383e38ed7f9b5ba93c60ce0/library/init/data/nat/gcd.lean#L36">\(\forall n \in \mathbb{N},~\texttt{gcd n n = n}\)</a>, where <span style="background-color:rgb(222, 235, 247);">\(\texttt{gcd}\)</span> is the greatest common divisor. When proving the theorem, we start from the original theorem as the initial state (the root) and repeatedly apply tactics (the edges) to decompose states into simpler sub-states, until all states are solved (the leaf nodes). Tactics may rely on premises such as <span style="background-color:rgb(251, 229, 214);">\(\texttt{mod_self}\)</span> and <span style="background-color:rgb(226, 240, 217);">\(\texttt{gcd_zero_left}\)</span> defined in a large math library. E.g., <span style="background-color:rgb(251, 229, 214);">\(\texttt{mod_self}\)</span> is an existing theorem <a target="_blank" href="https://github.com/leanprover-community/lean/blob/cce7990ea86a78bdb383e38ed7f9b5ba93c60ce0/library/init/data/nat/lemmas.lean#L861">\(\forall n \in \mathbb{N},~\texttt{n % n = 0}\)</a> used in the proof to simplify the goal.</p>
<br />
<p style="font-size: 125%"><b>Bottom</b>: Our ReProver model. Given a state, it retrieves premises from the math library, which are concatenated with the state and fed into an encoder-decoder Transformer to generate the next tactic.</p>
</div>
</div>
</div>
</div>
</section>
<!--
<section class="section">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column">
<h2 class="title is-3">Abstract</h2>
<div class="content has-text-justified">
<p style="font-size: 125%">
Large language models (LLMs) have shown promise in proving formal theorems using proof assistants such as Lean.
However, existing methods are difficult to reproduce or build on, due to private code, data, and large compute
requirements. This has created substantial barriers to research on machine learning methods for theorem proving.
This paper removes these barriers by introducing <em>LeanDojo</em>: an open-source Lean playground consisting of toolkits,
data, models, and benchmarks. LeanDojo extracts data from Lean and enables interaction with the proof environment programmatically.
It contains fine-grained annotations of premises in proofs, providing valuable data for <em>premise selection</em>—
a key bottleneck in theorem proving. Using this data, we develop <em>ReProver</em> (<u>Re</u>trieval-Augmented <u>Prover</u>):
an LLM-based prover that is augmented with retrieval for selecting premises from a vast math library.
It is inexpensive and needs only one GPU week of training. Our retriever leverages LeanDojo's program analysis
capability to identify accessible premises and hard negative examples, which makes retrieval much more effective.
Furthermore, we construct a new benchmark consisting of 98,641 theorems and proofs extracted from Lean's math library.
It features challenging data split requiring the prover to generalize to theorems relying on novel premises that are never
used in training. We use this benchmark for training and evaluation, and experimental results demonstrate the effectiveness
of ReProver over non-retrieval baselines and GPT-4. <b>We thus provide the first set of open-source LLM-based theorem provers without
any proprietary datasets and release it under a permissive MIT license to facilitate further research.</b>
</p>
</div>
</div>
</div>
</div>
</section>
-->
<section class="section">
<div class="container is-max-widescreen">
<div class="rows">
<div class="rows is-centered ">
<div class="row is-full-width">
<h2 class="title is-3"><span class="dvima">Benchmarks</span></h2>
<p style="font-size: 125%">
<ul style="font-size: 125%; padding-left: 5%">
<li><a href="https://doi.org/10.5281/zenodo.8016385"><img src="https://zenodo.org/badge/DOI/10.5281/zenodo.8016385.svg" alt="DOI"></a> <b>LeanDojo Benchmark</b>: 98,734 theorems/proofs, 217,776 tactics, and 130,262 premises from <a target="_blank" href="https://github.com/leanprover-community/mathlib/tree/19c869efa56bbb8b500f2724c0b77261edbfa28c">mathlib</a>.</li>
<li><a href="https://doi.org/10.5281/zenodo.8040109"><img src="https://zenodo.org/badge/DOI/10.5281/zenodo.8040109.svg" alt="DOI"></a> <b>LeanDojo Benchmark 4</b>: 122,517 theorems/proofs, 259,580 tactics, and 167,779 premises from <a target="_blank" href="https://github.com/leanprover-community/mathlib4/tree/29dcec074de168ac2bf835a77ef68bbe069194c5">mathlib4</a>.</li>
</ul>
</p>
<br />
<p style="font-size: 125%">LeanDojo can extract data from any GitHub repos in Lean (supporting both Lean 3 and Lean 4). The data contains rich information not directly visible in the raw Lean code, including file dependencies, abstract syntax trees (ASTs), proof states, tactics, and premises.</p>
<br />
<p style="font-size: 125%"><b>Key feature 1: Premise information</b>: LeanDojo Benchmark contains fine-grained annotations of premises (where they are used in proofs and where they are defined in the library), providing valuable data for premise selection—a key bottleneck in theorem proving.</p>
<br />
<p style="font-size: 125%"><b>Key feature 2: Challenging data split</b>: Splitting theorems randomly into training/testing leads to overestimated performance. LLMs can prove seemingly difficult theorems simply by memorizing the proofs of similar theorems during training. We mitigate this issue by designing challenging data split requiring the model to generalize to theorems relying on novel premises that are never used in training.</p>
</div>
</div>
</div>
</div>
</section>
<section class="section">
<div class="container is-max-widescreen">
<div class="rows">
<div class="rows is-centered ">
<div class="row is-full-width">
<h2 class="title is-3"><span class="dvima">Interacting with Lean</span></h2>
<img src="images/interaction.jpg" width="50%" class="interpolation-image" alt="" style="display: block; margin-left: auto; margin-right: auto"/>
<p style="font-size: 125%">LeanDojo turns Lean into a <a target="_blank" href="https://github.com/openai/gym">gym-like</a> environment, in which the prover can observe the proof state, run tactics to change the state, and receive feedback on errors or on proof completion. This environment is indispensable for evaluating/deploying the prover or training it through RL.</p>
</div>
</div>
</div>
</div>
</section>
<!--
<section class="section">
<div class="container is-max-widescreen">
<div class="rows">
<div class="rows is-centered ">
<div class="row is-full-width">
<h2 class="title is-3"><span class="dvima">LeanDojo Benchmark</span></h2>
<img src="images/quaternion.jpg" class="interpolation-image" alt="" style="display: block; margin-left: auto; margin-right: auto"/>
</div>
</div>
</div>
</div>
</section>
<section class="section">
<div class="container is-max-widescreen">
<div class="rows">
<div class="rows is-centered ">
<div class="row is-full-width">
<h2 class="title is-3"><span class="dvima">ReProver: Retrieval-Augmented Theorem Prover</span></h2>
</div>
</div>
</div>
</div>
</section>
-->
<section class="section">
<div class="container is-max-widescreen">
<div class="rows">
<div class="rows is-centered ">
<div class="row is-full-width">
<h2 class="title is-3"><span class="dvima">Experiments</span></h2>
<img src="images/results.png" width="40%" class="interpolation-image" alt="" style="display: block; margin-left: auto; margin-right: auto"/>
<br />
<p style="font-size: 125%">We use LeanDojo Benchmark to train and evaluate ReProver. During testing, the tactic generator is combined with best-first search to search for complete proofs. The table shows the percentage of theorem proved within 10 minutes. The \(\texttt{novel_premises}\) spilt is much more challenging than the \(\texttt{random}\) split. On both data splits, ReProver outperforms Lean's built-in proof automation tactic (tidy), a baseline that generates tactics directly without retrieval, and another baseline using GPT-4 to generate tactics in a zero-shot manner.</p>
</div>
</div>
</div>
</div>
</section>
<section class="section">
<div class="container is-max-widescreen">
<div class="rows">
<div class="rows is-centered ">
<div class="row is-full-width">
<h2 class="title is-3"><span class="dvima">Discovering New Proofs and Uncovering Formalization Bugs</span></h2>
<p style="font-size: 125%">We evaluate ReProver on theorems in miniF2F and ProofNet. It discovers <a target="_blank" href="https://github.com/facebookresearch/miniF2F/pull/13">33 proofs in miniF2F</a> and <a target="_blank" href="https://github.com/zhangir-azerbayev/ProofNet/pull/14">39 proofs in ProofNet</a> of theorems that did not have existing proofs in Lean. Our proofs have helped ProofNet uncover multiple bugs in the formalization of theorem statements.</p>
</div>
</div>
</div>
</div>
</section>
<section class="section">
<div class="container is-max-widescreen">
<div class="rows">
<div class="rows is-centered ">
<div class="row is-full-width">
<h2 class="title is-3"><span class="dvima" id="chatgpt-for-theorem-proving">ChatGPT for Theorem Proving</span></h2>
<div class="columns">
<div class="column has-text-left video-column">
<video controls muted width="100%">
<source src="images/chatgpt-1.mp4" type="video/mp4">
</video>
<div align="center">
<a target="_blank" href="https://github.com/yangky11/lean-example/blob/5a0360e49946815cb53132638ccdd46fb1859e2a/src/example.lean#L3">\(a + b + c = a + c + b\)</a>
</div>
</div>
<div class="column has-text-left video-column">
<video controls muted width="100%">
<source src="images/chatgpt-2.mp4" type="video/mp4">
</video>
<div align="center">
<a target="_blank" href="https://github.com/leanprover-community/mathlib/blob/19c869efa56bbb8b500f2724c0b77261edbfa28c/src/analysis/special_functions/stirling.lean#L252">Stirling’s formula</a>
</div>
</div>
<div class="column has-text-left video-column">
<video controls muted width="100%">
<source src="images/chatgpt-3.mp4" type="video/mp4">
</video>
<div align="center">
<a target="_blank" href="https://github.com/leanprover-community/mathlib/blob/19c869efa56bbb8b500f2724c0b77261edbfa28c/src/algebra/big_operators/intervals.lean#L195">Gauss' summation formula</a>
</div>
</div>
</div>
<br />
<p style="font-size: 125%">We build a <a target="_blank" href="https://github.com/lean-dojo/LeanDojoChatGPT">LeanDojo ChatGPT plugin</a> that enables ChatGPT to prove theorems by interacting with Lean. Compared to specialized LLMs finetuned for theorem proving (e.g., ReProver), ChatGPT can interleave informal mathematics with formal proof steps, similar to how humans interact with proof assistants. It can explain error messages from Lean and is more steerable (by prompt engineering) than specialized provers. However, it struggles to find correct proofs in most cases, due to weakness in search and planning.</p>
</div>
</div>
</div>
</div>
</section>
<section class="section">
<div class="container is-max-widescreen">
<div class="rows">
<div class="rows is-centered ">
<div class="row is-full-width">
<h2 class="title is-3"><span class="dvima">Team</span></h2>
<div class="columns" style="max-width: 95%; padding-left: 10%;">
<div class="column has-text-centered video-column">
<a href="https://yangky11.github.io/" target="_blank" style="border-bottom: none;">
<span class="image" style="padding-left: 10%;"><img src="images/Kaiyu.jpg" alt="" style="width: 90%;" /></span>
<span style="font-weight: bold;">Kaiyu Yang</span>
</a>
</div>
<div class="column has-text-centered video-column">
<a href="https://aidanswope.com/about" target="_blank" style="border-bottom: none;">
<span class="image" style="padding-left: 10%;"><img src="images/Aidan.jpg" alt="" style="width: 90%;" /></span>
<span style="font-weight: bold;">Aidan Swope</span>
</a>
</div>
<div class="column has-text-centered video-column">
<a href="https://minimario.github.io/" target="_blank" style="border-bottom: none;">
<span class="image" style="padding-left: 10%;"><img src="images/Alex.jpg" alt="" style="width: 90%;" /></span>
<span style="font-weight: bold;">Alex Gu</span>
</a>
</div>
<div class="column has-text-centered video-column">
<a href="https://rchalamala.github.io/" target="_blank" style="border-bottom: none;">
<span class="image" style="padding-left: 10%;"><img src="images/Rahul.jpg" alt="" style="width: 90%;" /></span>
<span style="font-weight: bold;">Rahul Chalamala</span>
</a>
</div>
<div class="column has-text-centered video-column">
<a href="https://peiyang-song.github.io/" target="_blank" style="border-bottom: none;">
<span class="image" style="padding-left: 10%;"><img src="images/Peiyang.jpg" alt="" style="width: 90%;" /></span>
<span style="font-weight: bold;">Peiyang Song</span>
</a>
</div>
</div>
<br />
<div class="columns" style="max-width: 80%; padding-left: 10%;">
<div class="column has-text-centered video-column">
<a href="https://billysx.github.io/" target="_blank" style="border-bottom: none;">
<span class="image" style="padding-left: 10%;"><img src="images/Shixing.jpg" alt="" style="width: 90%;" /></span>
<span style="font-weight: bold;">Shixing Yu</span>
</a>
</div>
<div class="column has-text-centered video-column">
<a href="https://www.linkedin.com/in/saad-godil-9728353/" target="_blank" style="border-bottom: none;">
<span class="image" style="padding-left: 10%;"><img src="images/Saad.jpg" alt="" style="width: 90%;" /></span>
<span style="font-weight: bold;">Saad Godil</span>
</a>
</div>
<div class="column has-text-centered video-column">
<a href="https://www.linkedin.com/in/ryan-prenger-18797ba1/" target="_blank" style="border-bottom: none;">
<span class="image" style="padding-left: 10%;"><img src="images/Ryan.jpg" alt="" style="width: 90%;" /></span>
<span style="font-weight: bold;">Ryan Prenger</span>
</a>
</div>
<div class="column has-text-centered video-column">
<a href="http://tensorlab.cms.caltech.edu/users/anima/" target="_blank" style="border-bottom: none;">
<span class="image" style="padding-left: 10%;"><img src="images/Anima.jpg" alt="" style="width: 90%;" /></span>
<span style="font-weight: bold;">Anima Anandkumar</span>
</a>
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<section class="section" id="BibTeX">
<div class="container is-max-widescreen content">
<h2 class="title">BibTeX</h2>
<pre><code>@inproceedings{yang2023leandojo,
title={{LeanDojo}: Theorem Proving with Retrieval-Augmented Language Models},
author={Yang, Kaiyu and Swope, Aidan and Gu, Alex and Chalamala, Rahul and Song, Peiyang and Yu, Shixing and Godil, Saad and Prenger, Ryan and Anandkumar, Anima},
booktitle={Neural Information Processing Systems (NeurIPS)},
year={2023}
}
@misc{song2024largelanguagemodelscopilots,
title={Towards Large Language Models as Copilots for Theorem Proving in Lean},
author={Peiyang Song and Kaiyu Yang and Anima Anandkumar},
year={2024},
eprint={2404.12534},
archivePrefix={arXiv},
primaryClass={cs.AI},
url={https://arxiv.org/abs/2404.12534},
}</code></pre>
</div>
</section>
<footer class="footer">
<div class="container">
<div class="columns is-centered">
<div class="column">
<div class="content has-text-centered">
<p>
Website template borrowed from <a
href="https://vimalabs.github.io/">VIMA</a>.
</p>
</div>
</div>
</div>
</div>
</footer>
</body>
</html>