-
Notifications
You must be signed in to change notification settings - Fork 0
/
bib.bib
1216 lines (1068 loc) · 60.2 KB
/
bib.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@inproceedings{pqlearn,
author = {Goldwasser, Shafi and Kalai, Adam Tauman and Kalai, Yael and Montasser, Omar},
booktitle = {Advances in Neural Information Processing Systems},
editor = {Larochelle, H and Ranzato, M and Hadsell, R and Balcan, M F and Lin, H},
pages = {15859--15870},
publisher = {Curran Associates, Inc.},
title = {{Beyond Perturbations: Learning Guarantees with Arbitrary Adversarial Test Examples}},
url = {https://proceedings.neurips.cc/paper/2020/file/b6c8cf4c587f2ead0c08955ee6e2502b-Paper.pdf},
volume = {33},
year = {2020}
}
@article{williams1995gaussian,
title={Gaussian processes for regression},
author={Williams, Christopher and Rasmussen, Carl},
journal={Advances in neural information processing systems},
volume={8},
year={1995}
}
@inproceedings{failloud,
author = {Stephan Rabanser and
Stephan G{\"{u}}nnemann and
Zachary C. Lipton},
editor = {Hanna M. Wallach and
Hugo Larochelle and
Alina Beygelzimer and
Florence d'Alch{\'{e}}{-}Buc and
Emily B. Fox and
Roman Garnett},
title = {Failing Loudly: An Empirical Study of Methods for Detecting Dataset
Shift},
booktitle = {Advances in Neural Information Processing Systems 32: Annual Conference
on Neural Information Processing Systems 2019, NeurIPS 2019, December
8-14, 2019, Vancouver, BC, Canada},
pages = {1394--1406},
year = {2019},
url = {https://proceedings.neurips.cc/paper/2019/hash/846c260d715e5b854ffad5f70a516c88-Abstract.html},
timestamp = {Thu, 21 Jan 2021 15:15:19 +0100},
biburl = {https://dblp.org/rec/conf/nips/RabanserGL19.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{bbsd,
title = {Detecting and correcting for label shift with black box predictors},
author = {Lipton, Zachary and Wang, Yu-Xiang and Smola, Alexander},
booktitle = {International conference on machine learning},
pages = {3122--3130},
year = {2018},
organization = {PMLR}
}
@inproceedings{floods,
title = {ML for Flood Forecasting at Scale},
author = {Sella Nevo and Ami Wiesel and Avinatan Hassidim and Gal Elidan and Guy Shalev and Mor Schlesinger and Oleg Zlydenko and Ran El-Yaniv and Yotam Gigi and Zach Moshe and Yossi Matias},
year = {2018},
booktitle = {Proceedings of the NIPS AI for Social Good Workshop}
}
@article{mirai,
author = {Adam Yala and Peter G. Mikhael and Fredrik Strand and Gigin Lin and Kevin Smith and Yung-Liang Wan and Leslie Lamb and Kevin Hughes and Constance Lehman and Regina Barzilay },
title = {Toward robust mammography-based models for breast cancer risk},
journal = {Science Translational Medicine},
volume = {13},
number = {578},
year = {2021},
doi = {10.1126/scitranslmed.aba4373},
URL = {https://www.science.org/doi/abs/10.1126/scitranslmed.aba4373},
}
@article{atmos,
article-number = {455},
author = {Asthana, Tanmay and Krim, Hamid and Sun, Xia and Roheda, Siddharth and Xie, Lian},
doi = {10.3390/atmos12040455},
issn = {2073-4433},
journal = {Atmosphere},
number = {4},
title = {Atlantic Hurricane Activity Prediction: A Machine Learning Approach},
url = {https://www.mdpi.com/2073-4433/12/4/455},
volume = {12},
year = {2021},
Bdsk-Url-1 = {https://www.mdpi.com/2073-4433/12/4/455},
Bdsk-Url-2 = {https://doi.org/10.3390/atmos12040455}
}
@InProceedings{av_1,
author = {Prakash, Aditya and Chitta, Kashyap and Geiger, Andreas},
title = {Multi-Modal Fusion Transformer for End-to-End Autonomous Driving},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2021},
pages = {7077-7087}
}
@misc{googleAR,
author = {},
title = {Google AI Blog: Real-Time AR Self-Expression with Machine Learning},
howpublished = {\url{https://ai.googleblog.com/2019/03/real-time-ar-self-expression-with.html}},
month = {},
year = {},
note = {(Accessed on 01/19/2022)}
}
@article{uspec1,
author = {Alexander D'Amour and
Katherine A. Heller and
Dan Moldovan and
Ben Adlam and
Babak Alipanahi and
Alex Beutel and
Christina Chen and
Jonathan Deaton and
Jacob Eisenstein and
Matthew D. Hoffman and
Farhad Hormozdiari and
Neil Houlsby and
Shaobo Hou and
Ghassen Jerfel and
Alan Karthikesalingam and
Mario Lucic and
Yi{-}An Ma and
Cory Y. McLean and
Diana Mincu and
Akinori Mitani and
Andrea Montanari and
Zachary Nado and
Vivek Natarajan and
Christopher Nielson and
Thomas F. Osborne and
Rajiv Raman and
Kim Ramasamy and
Rory Sayres and
Jessica Schrouff and
Martin Seneviratne and
Shannon Sequeira and
Harini Suresh and
Victor Veitch and
Max Vladymyrov and
Xuezhi Wang and
Kellie Webster and
Steve Yadlowsky and
Taedong Yun and
Xiaohua Zhai and
D. Sculley},
title = {Underspecification Presents Challenges for Credibility in Modern Machine
Learning},
journal = {CoRR},
volume = {abs/2011.03395},
year = {2020},
url = {https://arxiv.org/abs/2011.03395},
eprinttype = {arXiv},
eprint = {2011.03395},
timestamp = {Thu, 14 Oct 2021 09:17:26 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2011-03395.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{uspec2,
author = {David Madras and
James Atwood and
Alex D'Amour},
title = {Detecting Extrapolation with Local Ensembles},
journal = {CoRR},
volume = {abs/1910.09573},
year = {2019},
url = {http://arxiv.org/abs/1910.09573},
eprinttype = {arXiv},
eprint = {1910.09573},
timestamp = {Fri, 25 Oct 2019 14:59:26 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1910-09573.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@ARTICLE{cancer1, author = {Cao, Ruiming and Mohammadian Bajgiran, Amirhossein and Afshari Mirak, Sohrab and Shakeri, Sepideh and Zhong, Xinran and Enzmann, Dieter and Raman, Steven and Sung, Kyunghyun}, journal = {IEEE Transactions on Medical Imaging}, title = {Joint Prostate Cancer Detection and Gleason Score Prediction in mp-MRI via FocalNet}, year = {2019}, volume = {38}, number = {11}, pages = {2496-2506}, doi = {10.1109/TMI.2019.2901928} }
@article{cancer2,
author = {Capper, David and Jones, David T. W. and Sill, Martin and Hovestadt, Volker and Schrimpf, Daniel and Sturm, Dominik and Koelsche, Christian and Sahm, Felix and Chavez, Lukas and Reuss, David E. and Kratz, Annekathrin and Wefers, Annika K. and Huang, Kristin and Pajtler, Kristian W. and Schweizer, Leonille and Stichel, Damian and Olar, Adriana and Engel, Nils W. and Lindenberg, Kerstin and Harter, Patrick N. and Braczynski, Anne K. and Plate, Karl H. and Dohmen, Hildegard and Garvalov, Boyan K. and Coras, Roland and H{\"o}lsken, Annett and Hewer, Ekkehard and Bewerunge-Hudler, Melanie and Schick, Matthias and Fischer, Roger and Beschorner, Rudi and Schittenhelm, Jens and Staszewski, Ori and Wani, Khalida and Varlet, Pascale and Pages, Melanie and Temming, Petra and Lohmann, Dietmar and Selt, Florian and Witt, Hendrik and Milde, Till and Witt, Olaf and Aronica, Eleonora and Giangaspero, Felice and Rushing, Elisabeth and Scheurlen, Wolfram and Geisenberger, Christoph and Rodriguez, Fausto J. and Becker, Albert and Preusser, Matthias and Haberler, Christine and Bjerkvig, Rolf and Cryan, Jane and Farrell, Michael and Deckert, Martina and Hench, J{\"u}rgen and Frank, Stephan and Serrano, Jonathan and Kannan, Kasthuri and Tsirigos, Aristotelis and Br{\"u}ck, Wolfgang and Hofer, Silvia and Brehmer, Stefanie and Seiz-Rosenhagen, Marcel and H{\"a}nggi, Daniel and Hans, Volkmar and Rozsnoki, Stephanie and Hansford, Jordan R. and Kohlhof, Patricia and Kristensen, Bjarne W. and Lechner, Matt and Lopes, Beatriz and Mawrin, Christian and Ketter, Ralf and Kulozik, Andreas and Khatib, Ziad and Heppner, Frank and Koch, Arend and Jouvet, Anne and Keohane, Catherine and M{\"u}hleisen, Helmut and Mueller, Wolf and Pohl, Ute and Prinz, Marco and Benner, Axel and Zapatka, Marc and Gottardo, Nicholas G. and Driever, Pablo Hern{\'a}iz and Kramm, Christof M. and M{\"u}ller, Hermann L. and Rutkowski, Stefan and von Hoff, Katja and Fr{\"u}hwald, Michael C. and Gnekow, Astrid and Fleischhack, Gudrun and Tippelt, Stephan and Calaminus, Gabriele and Monoranu, Camelia-Maria and Perry, Arie and Jones, Chris and Jacques, Thomas S. and Radlwimmer, Bernhard and Gessi, Marco and Pietsch, Torsten and Schramm, Johannes and Schackert, Gabriele and Westphal, Manfred and Reifenberger, Guido and Wesseling, Pieter and Weller, Michael and Collins, Vincent Peter and Bl{\"u}mcke, Ingmar and Bendszus, Martin and Debus, J{\"u}rgen and Huang, Annie and Jabado, Nada and Northcott, Paul A. and Paulus, Werner and Gajjar, Amar and Robinson, Giles W. and Taylor, Michael D. and Jaunmuktane, Zane and Ryzhova, Marina and Platten, Michael and Unterberg, Andreas and Wick, Wolfgang and Karajannis, Matthias A. and Mittelbronn, Michel and Acker, Till and Hartmann, Christian and Aldape, Kenneth and Sch{\"u}ller, Ulrich and Buslei, Rolf and Lichter, Peter and Kool, Marcel and Herold-Mende, Christel and Ellison, David W. and Hasselblatt, Martin and Snuderl, Matija and Brandner, Sebastian and Korshunov, Andrey and von Deimling, Andreas and Pfister, Stefan M.},
journal = {Nature},
number = {7697},
pages = {469--474},
title = {DNA methylation-based classification of central nervous system tumours},
volume = {555},
year = {2018} }
@article{cancer3,
author = {Ardila, Diego and Kiraly, Atilla P. and Bharadwaj, Sujeeth and Choi, Bokyung and Reicher, Joshua J. and Peng, Lily and Tse, Daniel and Etemadi, Mozziyar and Ye, Wenxing and Corrado, Greg and Naidich, David P. and Shetty, Shravya},
journal = {Nature Medicine},
number = {6},
pages = {954--961},
title = {End-to-end lung cancer screening with three-dimensional deep learning on low-dose chest computed tomography},
volume = {25},
year = {2019}
}
@INPROCEEDINGS{disaster1, author = {Arinta, Rania Rizki and Andi W.R., Emanuel}, booktitle = {2019 4th International Conference on Information Technology, Information Systems and Electrical Engineering (ICITISEE)}, title = {Natural Disaster Application on Big Data and Machine Learning: A Review}, year = {2019}, volume = {}, number = {}, pages = {249-254}, doi = {10.1109/ICITISEE48480.2019.9003984} }
@article{disaster2,
author = {Jeong, Mira and Park, MinJi and Nam, Jaeyeal and Ko, Byoung Chul},
journal = {Sensors},
number = {19},
title = {Light-Weight Student LSTM for Real-Time Wildfire Smoke Detection},
volume = {20},
year = {2020} }
@article{shiftrob,
author = {Rohan Taori and
Achal Dave and
Vaishaal Shankar and
Nicholas Carlini and
Benjamin Recht and
Ludwig Schmidt},
title = {Measuring Robustness to Natural Distribution Shifts in Image Classification},
journal = {CoRR},
volume = {abs/2007.00644},
year = {2020},
url = {https://arxiv.org/abs/2007.00644},
eprinttype = {arXiv},
eprint = {2007.00644},
timestamp = {Mon, 06 Jul 2020 15:26:01 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2007-00644.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@INPROCEEDINGS{audit, author = {Vicente, Javier and Tortajada, Salvador and Fuster-Garcia, Elies and García-Gómez, Juan Miguel and Robles, Montserrat}, booktitle = {2012 6th IEEE International Conference Intelligent Systems}, title = {An audit method suited for decision support systems for clinical environment}, year = {2012}, volume = {}, number = {}, pages = {281-288}, doi = {10.1109/IS.2012.6335149} }
@inproceedings{ensemble,
author = {Lakshminarayanan, Balaji and Pritzel, Alexander and Blundell, Charles},
title = {Simple and Scalable Predictive Uncertainty Estimation Using Deep Ensembles},
year = {2017},
isbn = {9781510860964},
publisher = {Curran Associates Inc.},
address = {Red Hook, NY, USA},
abstract = {Deep neural networks (NNs) are powerful black box predictors that have recently achieved impressive performance on a wide spectrum of tasks. Quantifying predictive uncertainty in NNs is a challenging and yet unsolved problem. Bayesian NNs, which learn a distribution over weights, are currently the state-of-the-art for estimating predictive uncertainty; however these require significant modifications to the training procedure and are computationally expensive compared to standard (non-Bayesian) NNs. We propose an alternative to Bayesian NNs that is simple to implement, readily parallelizable, requires very little hyperparameter tuning, and yields high quality predictive uncertainty estimates. Through a series of experiments on classification and regression benchmarks, we demonstrate that our method produces well-calibrated uncertainty estimates which are as good or better than approximate Bayesian NNs. To assess robustness to dataset shift, we evaluate the predictive uncertainty on test examples from known and unknown distributions, and show that our method is able to express higher uncertainty on out-of-distribution examples. We demonstrate the scalability of our method by evaluating predictive uncertainty estimates on ImageNet.},
booktitle = {Proceedings of the 31st International Conference on Neural Information Processing Systems},
pages = {6405–6416},
numpages = {12},
location = {Long Beach, California, USA},
series = {NIPS'17}
}
@inproceedings{trustuncert,
author = {Ovadia, Yaniv and Fertig, Emily and Ren, Jie and Nado, Zachary and Sculley, D. and Nowozin, Sebastian and Dillon, Joshua and Lakshminarayanan, Balaji and Snoek, Jasper},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
publisher = {Curran Associates, Inc.},
title = {Can you trust your model\textquotesingle s uncertainty? Evaluating predictive uncertainty under dataset shift},
volume = {32},
year = {2019} }
@inproceedings{bayesCV,
author = {Kendall, Alex and Gal, Yarin},
title = {What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?},
year = {2017},
isbn = {9781510860964},
publisher = {Curran Associates Inc.},
address = {Red Hook, NY, USA},
abstract = {There are two major types of uncertainty one can model. Aleatoric uncertainty captures noise inherent in the observations. On the other hand, epistemic uncertainty accounts for uncertainty in the model - uncertainty which can be explained away given enough data. Traditionally it has been difficult to model epistemic uncertainty in computer vision, but with new Bayesian deep learning tools this is now possible. We study the benefits of modeling epistemic vs. aleatoric uncertainty in Bayesian deep learning models for vision tasks. For this we present a Bayesian deep learning framework combining input-dependent aleatoric uncertainty together with epistemic uncertainty. We study models under the framework with per-pixel semantic segmentation and depth regression tasks. Further, our explicit uncertainty formulation leads to new loss functions for these tasks, which can be interpreted as learned attenuation. This makes the loss more robust to noisy data, also giving new state-of-the-art results on segmentation and depth regression benchmarks.},
booktitle = {Proceedings of the 31st International Conference on Neural Information Processing Systems},
pages = {5580–5590},
numpages = {11},
location = {Long Beach, California, USA},
series = {NIPS'17}
}
@article{concept,
title = {Concept drift detection and adaptation with hierarchical hypothesis testing},
author = {Yu, Shujian and Abraham, Zubin and Wang, Heng and Shah, Mohak and Wei, Yantao and Pr{\'\i}ncipe, Jos{\'e} C},
journal = {Journal of the Franklin Institute},
volume = {356},
number = {5},
pages = {3187--3215},
year = {2019},
publisher = {Elsevier}
}
@INPROCEEDINGS{covblackbox, author = {Alberge, Florence and Feutry, Clément and Duhamel, Pierre and Piantanida, Pablo}, booktitle = {2019 26th International Conference on Telecommunications (ICT)}, title = {Detecting Covariate Shift with Black Box Predictors}, year = {2019}, volume = {}, number = {}, pages = {324-329}, doi = {10.1109/ICT.2019.8798827} }
@article{underspec,
author = {Alexander D'Amour and
Katherine A. Heller and
Dan Moldovan and
Ben Adlam and
Babak Alipanahi and
Alex Beutel and
Christina Chen and
Jonathan Deaton and
Jacob Eisenstein and
Matthew D. Hoffman and
Farhad Hormozdiari and
Neil Houlsby and
Shaobo Hou and
Ghassen Jerfel and
Alan Karthikesalingam and
Mario Lucic and
Yi{-}An Ma and
Cory Y. McLean and
Diana Mincu and
Akinori Mitani and
Andrea Montanari and
Zachary Nado and
Vivek Natarajan and
Christopher Nielson and
Thomas F. Osborne and
Rajiv Raman and
Kim Ramasamy and
Rory Sayres and
Jessica Schrouff and
Martin Seneviratne and
Shannon Sequeira and
Harini Suresh and
Victor Veitch and
Max Vladymyrov and
Xuezhi Wang and
Kellie Webster and
Steve Yadlowsky and
Taedong Yun and
Xiaohua Zhai and
D. Sculley},
title = {Underspecification Presents Challenges for Credibility in Modern Machine
Learning},
journal = {CoRR},
volume = {abs/2011.03395},
year = {2020},
url = {https://arxiv.org/abs/2011.03395},
eprinttype = {arXiv},
eprint = {2011.03395},
timestamp = {Thu, 14 Oct 2021 09:17:26 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2011-03395.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inbook{ushakov_2011, title = {Continuity theorems and inversion formulas}, booktitle = {Selected topics in characteristic functions}, publisher = {De Gruyter}, author = {Ushakov, Nikolai G.}, year = {2011} }
@misc{calculus67:online,
author = {},
title = {calculus - Integral of \$(1+\textbackslash{}cos(t))\textasciicircum{}n\$ from \$-\textbackslash{}pi\$ to \$\textbackslash{}pi\$ - Mathematics Stack Exchange},
howpublished = {\url{https://math.stackexchange.com/questions/4410358/integral-of-1-costn-from-pi-to-pi?noredirect=1#comment9225952_4410358}},
month = {},
year = {},
note = {(Accessed on 03/22/2022)}
}
@inbook{complex, place = {Oxford}, title = {Complex Integration: Cauchy's Theorem}, booktitle = {Visual complex analysis}, publisher = {Oxford University Press}, author = {Needham, Tristan}, year = {2000}, pages = {377–417} }
@inproceedings{failuresofgen,
title = {Understanding failures in out-of-distribution detection with deep generative models},
author = {Zhang, Lily and Goldstein, Mark and Ranganath, Rajesh},
booktitle = {International Conference on Machine Learning},
pages = {12427--12436},
year = {2021},
organization = {PMLR}
}
@article{ganin2016domain,
title = {Domain-adversarial training of neural networks},
author = {Ganin, Yaroslav and Ustinova, Evgeniya and Ajakan, Hana and Germain, Pascal and Larochelle, Hugo and Laviolette, Fran{\c{c}}ois and Marchand, Mario and Lempitsky, Victor},
journal = {The journal of machine learning research},
volume = {17},
number = {1},
pages = {2096--2030},
year = {2016},
publisher = {JMLR. org}
}
@InProceedings{BernhardDomain,
title = {Domain Adaptation with Conditional Transferable Components},
author = {Gong, Mingming and Zhang, Kun and Liu, Tongliang and Tao, Dacheng and Glymour, Clark and Schölkopf, Bernhard},
booktitle = {Proceedings of The 33rd International Conference on Machine Learning},
pages = {2839--2848},
year = {2016},
editor = {Balcan, Maria Florina and Weinberger, Kilian Q.},
volume = {48},
series = {Proceedings of Machine Learning Research},
address = {New York, New York, USA},
month = {20--22 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v48/gong16.pdf},
url = {https://proceedings.mlr.press/v48/gong16.html},
abstract = {Domain adaptation arises in supervised learning when the training (source domain) and test (target domain) data have different distributions. Let X and Y denote the features and target, respectively, previous work on domain adaptation considers the covariate shift situation where the distribution of the features P(X) changes across domains while the conditional distribution P(Y|X) stays the same. To reduce domain discrepancy, recent methods try to find invariant components \mathcalT(X) that have similar P(\mathcalT(X)) by explicitly minimizing a distribution discrepancy measure. However, it is not clear if P(Y|\mathcalT(X)) in different domains is also similar when P(Y|X) changes. Furthermore, transferable components do not necessarily have to be invariant. If the change in some components is identifiable, we can make use of such components for prediction in the target domain. In this paper, we focus on the case where P(X|Y) and P(Y) both change in a causal system in which Y is the cause for X. Under appropriate assumptions, we aim to extract conditional transferable components whose conditional distribution P(\mathcalT(X)|Y) is invariant after proper location-scale (LS) transformations, and identify how P(Y) changes between domains simultaneously. We provide theoretical analysis and empirical evaluation on both synthetic and real-world data to show the effectiveness of our method.}
}
@InProceedings{wilds,
title = {WILDS: A Benchmark of in-the-Wild Distribution Shifts},
author = {Koh, Pang Wei and Sagawa, Shiori and Marklund, Henrik and Xie, Sang Michael and Zhang, Marvin and Balsubramani, Akshay and Hu, Weihua and Yasunaga, Michihiro and Phillips, Richard Lanas and Gao, Irena and Lee, Tony and David, Etienne and Stavness, Ian and Guo, Wei and Earnshaw, Berton and Haque, Imran and Beery, Sara M and Leskovec, Jure and Kundaje, Anshul and Pierson, Emma and Levine, Sergey and Finn, Chelsea and Liang, Percy},
booktitle = {Proceedings of the 38th International Conference on Machine Learning},
pages = {5637--5664},
year = {2021},
editor = {Meila, Marina and Zhang, Tong},
volume = {139},
series = {Proceedings of Machine Learning Research},
month = {18--24 Jul},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v139/koh21a/koh21a.pdf},
url = {https://proceedings.mlr.press/v139/koh21a.html},
abstract = {Distribution shifts—where the training distribution differs from the test distribution—can substantially degrade the accuracy of machine learning (ML) systems deployed in the wild. Despite their ubiquity in the real-world deployments, these distribution shifts are under-represented in the datasets widely used in the ML community today. To address this gap, we present WILDS, a curated benchmark of 10 datasets reflecting a diverse range of distribution shifts that naturally arise in real-world applications, such as shifts across hospitals for tumor identification; across camera traps for wildlife monitoring; and across time and location in satellite imaging and poverty mapping. On each dataset, we show that standard training yields substantially lower out-of-distribution than in-distribution performance. This gap remains even with models trained by existing methods for tackling distribution shifts, underscoring the need for new methods for training models that are more robust to the types of distribution shifts that arise in practice. To facilitate method development, we provide an open-source package that automates dataset loading, contains default model architectures and hyperparameters, and standardizes evaluations. The full paper, code, and leaderboards are available at https://wilds.stanford.edu.}
}
@inproceedings{
paz2017revisiting,
title = {Revisiting Classifier Two-Sample Tests},
author = {David Lopez-Paz and Maxime Oquab},
booktitle = {International Conference on Learning Representations},
year = {2017},
url = {https://openreview.net/forum?id=SJkXfE5xx}
}
@article{learnundercov,
author = {Steffen Bickel and Michael Br{{\"u}}ckner and Tobias Scheffer},
title = {Discriminative Learning Under Covariate Shift},
journal = {Journal of Machine Learning Research},
year = {2009},
volume = {10},
number = {75},
pages = {2137-2155},
url = {http://jmlr.org/papers/v10/bickel09a.html}
}
@inproceedings{covhighreg,
author = {Tripuraneni, Nilesh and Adlam, Ben and Pennington, Jeffrey},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {13883--13897},
publisher = {Curran Associates, Inc.},
title = {Overparameterization Improves Robustness to Covariate Shift in High Dimensions},
url = {https://proceedings.neurips.cc/paper/2021/file/73fed7fd472e502d8908794430511f4d-Paper.pdf},
volume = {34},
year = {2021},
bdsk-url-1 = {https://proceedings.neurips.cc/paper/2021/file/73fed7fd472e502d8908794430511f4d-Paper.pdf} }
@inproceedings{
relml4hc,
title = {Reliable and Trustworthy Machine Learning for Health Using Dataset Shift Detection},
author = {Chunjong Park and Anas Awadalla and Tadayoshi Kohno and Shwetak Patel},
booktitle = {Advances in Neural Information Processing Systems},
editor = {A. Beygelzimer and Y. Dauphin and P. Liang and J. Wortman Vaughan},
year = {2021},
url = {https://openreview.net/forum?id=hNMOSUxE8o6}
}
@inproceedings{mahalano,
author = {Lee, Kimin and Lee, Kibok and Lee, Honglak and Shin, Jinwoo},
booktitle = {Advances in Neural Information Processing Systems},
editor = {S. Bengio and H. Wallach and H. Larochelle and K. Grauman and N. Cesa-Bianchi and R. Garnett},
publisher = {Curran Associates, Inc.},
title = {A Simple Unified Framework for Detecting Out-of-Distribution Samples and Adversarial Attacks},
volume = {31},
year = {2018} }
@InProceedings{grammat,
title = {Detecting Out-of-Distribution Examples with {G}ram Matrices},
author = {Sastry, Chandramouli Shama and Oore, Sageev},
booktitle = {Proceedings of the 37th International Conference on Machine Learning},
pages = {8491--8501},
year = {2020},
editor = {III, Hal Daumé and Singh, Aarti},
volume = {119},
series = {Proceedings of Machine Learning Research},
month = {13--18 Jul},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v119/sastry20a/sastry20a.pdf},
url = {https://proceedings.mlr.press/v119/sastry20a.html},
abstract = {When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions; detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and predicted class. We find that characterizing activity patterns by Gram matrices and identifying anomalies in Gram matrix values can yield high OOD detection rates. We identify anomalies in the Gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and neither requires access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. We empirically demonstrate applicability across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).}
}
@article{deepknn,
author = {Nicolas Papernot and
Patrick D. McDaniel},
title = {Deep k-Nearest Neighbors: Towards Confident, Interpretable and Robust
Deep Learning},
journal = {CoRR},
volume = {abs/1803.04765},
year = {2018},
url = {http://arxiv.org/abs/1803.04765},
eprinttype = {arXiv},
eprint = {1803.04765},
timestamp = {Mon, 13 Aug 2018 16:48:06 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1803-04765.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@misc{dropout,
doi = {10.48550/ARXIV.1506.02142},
url = {https://arxiv.org/abs/1506.02142},
author = {Gal, Yarin and Ghahramani, Zoubin},
keywords = {Machine Learning (stat.ML), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning},
publisher = {arXiv},
year = {2015},
copyright = {arXiv.org perpetual, non-exclusive license}
}
@incollection{baysneural,
doi = {10.1007/978-3-030-42553-1_3},
url = {https://doi.org/10.1007%2F978-3-030-42553-1_3},
year = 2020,
publisher = {Springer International Publishing},
pages = {45--87},
author = {Ethan Goan and Clinton Fookes},
title = {Bayesian Neural Networks: An Introduction and Survey},
booktitle = {Case Studies in Applied Bayesian Data Science}
}
@article{evidential,
title = {Deep evidential regression},
author = {Amini, Alexander and Schwarting, Wilko and Soleimany, Ava and Rus, Daniela},
journal = {Advances in Neural Information Processing Systems},
volume = {33},
pages = {14927--14937},
year = {2020}
}
@article{evclass,
title = {Evidential deep learning to quantify classification uncertainty},
author = {Sensoy, Murat and Kaplan, Lance and Kandemir, Melih},
journal = {Advances in neural information processing systems},
volume = {31},
year = {2018}
}
@InProceedings{selectivenet,
title = {{S}elective{N}et: A Deep Neural Network with an Integrated Reject Option},
author = {Geifman, Yonatan and El-Yaniv, Ran},
booktitle = {Proceedings of the 36th International Conference on Machine Learning},
pages = {2151--2159},
year = {2019},
editor = {Chaudhuri, Kamalika and Salakhutdinov, Ruslan},
volume = {97},
series = {Proceedings of Machine Learning Research},
month = {09--15 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v97/geifman19a/geifman19a.pdf},
url = {https://proceedings.mlr.press/v97/geifman19a.html},
abstract = {We consider the problem of selective prediction (also known as reject option) in deep neural networks, and introduce SelectiveNet, a deep neural architecture with an integrated reject option. Existing rejection mechanisms are based mostly on a threshold over the prediction confidence of a pre-trained network. In contrast, SelectiveNet is trained to optimize both classification (or regression) and rejection simultaneously, end-to-end. The result is a deep neural network that is optimized over the covered domain. In our experiments, we show a consistently improved risk-coverage trade-off over several well-known classification and regression datasets, thus reaching new state-of-the-art results for deep selective classification.}
}
@INPROCEEDINGS{Haussler90probablyapproximately,
author = {David Haussler},
title = {Probably Approximately Correct Learning},
booktitle = {Proceedings of the Eighth National Conference on Artificial Intelligence},
year = {1990},
pages = {1101--1108},
publisher = {AAAI Press}
}
@InProceedings{slicendice,
title = {Efficient Learning with Arbitrary Covariate Shift},
author = {Kalai, Adam Tauman and Kanade, Varun},
booktitle = {Proceedings of the 32nd International Conference on Algorithmic Learning Theory},
pages = {850--864},
year = {2021},
editor = {Feldman, Vitaly and Ligett, Katrina and Sabato, Sivan},
volume = {132},
series = {Proceedings of Machine Learning Research},
month = {16--19 Mar},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v132/kalai21a/kalai21a.pdf},
url = {https://proceedings.mlr.press/v132/kalai21a.html},
abstract = {We give an efficient algorithm for learning a binary function in a given class $C$ of bounded VC dimension, with training data distributed according to $P$ and test data according to $Q$, where $P$ and $Q$ may be arbitrary distributions over $X$. This is the generic form of what is called \textit{covariate shift}, which is impossible in general as arbitrary $P$ and $Q$ may not even overlap. However, recently guarantees were given in a model called PQ-learning (Goldwasser et al., 2020) where the learner has: (a) access to unlabeled test examples from $Q$ (in addition to labeled samples from $P$, i.e., semi-supervised learning); and (b) the option to \textit{reject} any example and abstain from classifying it (i.e., selective classification). The algorithm of Goldwasser et al. (2020) requires an (agnostic) noise-tolerant learner for $C$. The present work gives a polynomial-time PQ-learning algorithm, called \textit{Slice-and-Dice}, that uses an oracle to a “reliable” learner for $C$, where reliable learning (Kalai et al., 2012) is a model of learning with one-sided noise. Furthermore, this reduction is optimal in the sense that we show the equivalence of reliable and PQ learning.}
}
@book{covbook,
author = {Sugiyama, Masashi and Kawanabe, Motoaki},
title = {Machine Learning in Non-Stationary Environments: Introduction to Covariate Shift Adaptation},
year = {2012},
isbn = {0262017091},
publisher = {The MIT Press},
abstract = {As the power of computing has grown over the past few decades, the field of machine learning has advanced rapidly in both theory and practice. Machine learning methods are usually based on the assumption that the data generation mechanism does not change over time. Yet real-world applications of machine learning, including image recognition, natural language processing, speech recognition, robot control, and bioinformatics, often violate this common assumption. Dealing with non-stationarity is one of modern machine learning's greatest challenges. This book focuses on a specific non-stationary environment known as covariate shift, in which the distributions of inputs (queries) change but the conditional distribution of outputs (answers) is unchanged, and presents machine learning theory, algorithms, and applications to overcome this variety of non-stationarity. After reviewing the state-of-the-art research in the field, the authors discuss topics that include learning under covariate shift, model selection, importance estimation, and active learning. They describe such real world applications of covariate shift adaption as brain-computer interface, speaker identification, and age prediction from facial images. With this book, they aim to encourage future research in machine learning, statistics, and engineering that strives to create truly autonomous learning machines able to learn under non-stationarity.}
}
@inproceedings{
cifar10C,
title = {Benchmarking Neural Network Robustness to Common Corruptions and Perturbations},
author = {Dan Hendrycks and Thomas Dietterich},
booktitle = {International Conference on Learning Representations},
year = {2019},
url = {https://openreview.net/forum?id=HJz6tiCqYm},
}
@article{krizhevsky2014cifar,
title = {The CIFAR-10 dataset},
author = {Krizhevsky, Alex and Nair, Vinod and Hinton, Geoffrey},
journal = {online: http://www. cs. toronto. edu/kriz/cifar. html},
volume = {55},
number = {5},
year = {2014}
}
@misc{misc_heart_disease_45,
author = {Janosi, Andras and Steinbrunn, William and Pfisterer, Matthias and Detrano, Robert},
title = {{Heart Disease}},
year = {1988},
howpublished = {UCI Machine Learning Repository}
}
@inproceedings{
ODIN,
title = {Enhancing The Reliability of Out-of-distribution Image Detection in Neural Networks},
author = {Shiyu Liang and Yixuan Li and R. Srikant},
booktitle = {International Conference on Learning Representations},
year = {2018},
url = {https://openreview.net/forum?id=H1VGkIxRZ},
}
@article{noisetolerantlearn,
author = {Avrim Blum and
Adam Kalai and
Hal Wasserman},
title = {Noise-Tolerant Learning, the Parity Problem, and the Statistical Query
Model},
journal = {CoRR},
volume = {cs.LG/0010022},
year = {2000},
url = {https://arxiv.org/abs/cs/0010022},
timestamp = {Fri, 10 Jan 2020 12:57:59 +0100},
biburl = {https://dblp.org/rec/journals/corr/cs-LG-0010022.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@INPROCEEDINGS{bnnoriginal, author = {Tishby and Levin and Solla}, booktitle = {International 1989 Joint Conference on Neural Networks}, title = {Consistent inference of probabilities in layered networks: predictions and generalizations}, year = {1989}, volume = {}, number = {}, pages = {403-409 vol.2}, doi = {10.1109/IJCNN.1989.118274} }
@article{10.1001/jamainternmed.2021.3333,
author = {Habib, Anand R. and Lin, Anthony L. and Grant, Richard W.},
journal = {JAMA Internal Medicine},
month = {08},
number = {8},
pages = {1040-1041},
title = {{The Epic Sepsis Model Falls Short---The Importance of External Validation}},
volume = {181},
year = {2021} }
@article{ben2006analysis,
title = {Analysis of representations for domain adaptation},
author = {Ben-David, Shai and Blitzer, John and Crammer, Koby and Pereira, Fernando},
journal = {Advances in neural information processing systems},
volume = {19},
year = {2006}
}
@inproceedings{mindperfgap,
title = {Mind the performance gap: examining dataset shift during prospective validation},
author = {Otles, Erkin and Oh, Jeeheh and Li, Benjamin and Bochinski, Michelle and Joo, Hyeon and Ortwine, Justin and Shenoy, Erica and Washer, Laraine and Young, Vincent B and Rao, Krishna and others},
booktitle = {Machine Learning for Healthcare Conference},
pages = {506--534},
year = {2021},
organization = {PMLR}
}
@article{densityratio,
title = {Likelihood ratios for out-of-distribution detection},
author = {Ren, Jie and Liu, Peter J and Fertig, Emily and Snoek, Jasper and Poplin, Ryan and Depristo, Mark and Dillon, Joshua and Lakshminarayanan, Balaji},
journal = {Advances in neural information processing systems},
volume = {32},
year = {2019}
}
@article{kdeood,
author = {Ertunc Erdil and
Krishna Chaitanya and
Ender Konukoglu},
title = {Unsupervised out-of-distribution detection using kernel density estimation},
journal = {CoRR},
volume = {abs/2006.10712},
year = {2020},
url = {https://arxiv.org/abs/2006.10712},
eprinttype = {arXiv},
eprint = {2006.10712},
timestamp = {Sat, 23 Jan 2021 01:20:56 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-2006-10712.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@InProceedings{densityofstates,
title = { Density of States Estimation for Out of Distribution Detection },
author = {Morningstar, Warren and Ham, Cusuh and Gallagher, Andrew and Lakshminarayanan, Balaji and Alemi, Alex and Dillon, Joshua},
booktitle = {Proceedings of The 24th International Conference on Artificial Intelligence and Statistics},
pages = {3232--3240},
year = {2021},
editor = {Banerjee, Arindam and Fukumizu, Kenji},
volume = {130},
series = {Proceedings of Machine Learning Research},
month = {13--15 Apr},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v130/morningstar21a/morningstar21a.pdf},
url = {https://proceedings.mlr.press/v130/morningstar21a.html},
abstract = { Perhaps surprisingly, recent studies have shown probabilistic model likelihoods have poor specificity for out-of-distribution (OOD) detection and often assign higher likelihoods to OOD data than in-distribution data. To ameliorate this issue we propose DoSE, the density of states estimator. Drawing on the statistical physics notion of “density of states,” the DoSE decision rule avoids direct comparison of model probabilities, and instead utilizes the “probability of the model probability,” or indeed the frequency of any reasonable statistic. The frequency is calculated using nonparametric density estimators (e.g., KDE and one-class SVM) which measure the typicality of various model statistics given the training data and from which we can flag test points with low typicality as anomalous. Unlike many other methods, DoSE requires neither labeled data nor OOD examples. DoSE is modular and can be trivially applied to any existing, trained model. We demonstrate DoSE’s state-of-the-art performance against other unsupervised OOD detectors on previously established “hard” benchmarks. }
}
@misc{AIissend45:online,
author = {Karen Hao},
title = {AI is sending people to jail—and getting it wrong},
journal = {MIT Technology Review},
howpublished = {\url{https://www.technologyreview.com/2019/01/21/137783/algorithms-criminal-justice-ai/}},
month = {},
year = {2019},
note = {(Accessed on 05/11/2022)}
}
@misc{Amazonsc17:online,
author = {Jeffrey Dastin},
title = {Amazon scraps secret AI recruiting tool that showed bias against women},
joural = {Reuters},
howpublished = {\url{https://www.reuters.com/article/us-amazon-com-jobs-automation-insight-idUSKCN1MK08G}},
month = {},
year = {2018},
note = {(Accessed on 05/11/2022)}
}
@misc{selfdrive,
author = {Lauren Smiley},
title = {‘I’m the Operator’: The Aftermath of a Self-Driving Tragedy},
journal = {WIRED},
howpublished = {\url{https://www.wired.com/story/uber-self-driving-car-fatal-crash/}},
month = {},
year = {2022},
note = {(Accessed on 05/11/2022)}
}
@article{relmahala,
author = {Jie Ren and
Stanislav Fort and
Jeremiah Liu and
Abhijit Guha Roy and
Shreyas Padhy and
Balaji Lakshminarayanan},
title = {A Simple Fix to Mahalanobis Distance for Improving Near-OOD Detection},
journal = {ICML workshop on Uncertainty and Robustness in Deep Learning},
year = {2021},
url = {http://www.gatsby.ucl.ac.uk/~balaji/udl2021/accepted-papers/UDL2021-paper-007.pdf},
eprinttype = {arXiv},
eprint = {2106.09022},
timestamp = {Tue, 29 Jun 2021 16:55:04 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2106-09022.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{tabdatasurvey,
author = {Vadim Borisov and
Tobias Leemann and
Kathrin Se{\ss}ler and
Johannes Haug and
Martin Pawelczyk and
Gjergji Kasneci},
title = {Deep Neural Networks and Tabular Data: {A} Survey},
journal = {CoRR},
volume = {abs/2110.01889},
year = {2021},
url = {https://arxiv.org/abs/2110.01889},
eprinttype = {arXiv},
eprint = {2110.01889},
timestamp = {Fri, 08 Oct 2021 15:47:55 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2110-01889.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
% @misc{
% zhao2021hdivergence,
% title={H-divergence: A Decision-Theoretic Probability Discrepancy Measure },
% author={Shengjia Zhao and Abhishek Sinha and Yutong He and Aidan Perreault and Jiaming Song and Stefano Ermon},
% year={2021},
% url={https://openreview.net/forum?id=uBHs6zpY4in}
% }
@inproceedings{
zhao2022comparing,
title = {Comparing Distributions by Measuring Differences that Affect Decision Making},
author = {Shengjia Zhao and Abhishek Sinha and Yutong He and Aidan Perreault and Jiaming Song and Stefano Ermon},
booktitle = {International Conference on Learning Representations},
year = {2022},
url = {https://openreview.net/forum?id=KB5onONJIAU}
}
@inproceedings{liu2020learning,
title = {Learning deep kernels for non-parametric two-sample tests},
author = {Liu, Feng and Xu, Wenkai and Lu, Jie and Zhang, Guangquan and Gretton, Arthur and Sutherland, Danica J},
booktitle = {International conference on machine learning},
pages = {6316--6326},
year = {2020},
organization = {PMLR}
}
@article{shimodaira2000improving,
title = {Improving predictive inference under covariate shift by weighting the log-likelihood function},
author = {Shimodaira, Hidetoshi},
journal = {Journal of statistical planning and inference},
volume = {90},
number = {2},
pages = {227--244},
year = {2000},
publisher = {Elsevier}
}
@ARTICLE{camelyon,
title = "Rotation Equivariant {CNNs} for Digital Pathology",
author = "Veeling, Bastiaan S and Linmans, Jasper and Winkens, Jim and
Cohen, Taco and Welling, Max",
month = jun,
year = 2018,
archivePrefix = "arXiv",
primaryClass = "cs.CV",
eprint = "1806.03962"
}
@inproceedings{xgb,
author = {Chen, Tianqi and Guestrin, Carlos},
title = {{XGBoost}: A Scalable Tree Boosting System},
booktitle = {Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},
series = {KDD '16},
year = {2016},
isbn = {978-1-4503-4232-2},
location = {San Francisco, California, USA},
pages = {785--794},
numpages = {10},
url = {http://doi.acm.org/10.1145/2939672.2939785},
doi = {10.1145/2939672.2939785},
acmid = {2939785},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {large-scale machine learning},
}
@article{deepsets,
title = {Deep sets},
author = {Zaheer, Manzil and Kottur, Satwik and Ravanbakhsh, Siamak and Poczos, Barnabas and Salakhutdinov, Russ R and Smola, Alexander J},
journal = {Advances in neural information processing systems},
volume = {30},
year = {2017}
}
@misc{scipykstest,
author = {},
title = {scipy.stats.ks\_2samp — SciPy v1.8.1 Manual},
howpublished = {\url{https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ks_2samp.html}},
month = {},
year = {},
note = {(Accessed on 05/24/2022)}
}
@article{kstest,
author = {Hodges, J. L. },
date = {1958/01/01},
date-added = {2022-05-24 15:31:58 -0400},
date-modified = {2022-05-24 15:31:58 -0400},
doi = {10.1007/BF02589501},
id = {Hodges1958},
isbn = {1871-2487},
journal = {Arkiv f{\"o}r Matematik},
number = {5},
pages = {469--486},
title = {The significance probability of the smirnov two-sample test},
url = {https://doi.org/10.1007/BF02589501},
volume = {3},
year = {1958},
bdsk-url-1 = {https://doi.org/10.1007/BF02589501} }
@misc{Barnardp88:online,
author = {Kamil Erguler},
title = {Barnard.pdf},
howpublished = {\url{https://cran.r-project.org/web/packages/Barnard/Barnard.pdf}},
month = {10},
year = {2016},
note = {(Accessed on 05/24/2022)}
}
@inproceedings{torchvision,
author = {Marcel, S\'{e}bastien and Rodriguez, Yann},
title = {Torchvision the Machine-Vision Package of Torch},
year = {2010},
isbn = {9781605589336},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/1873951.1874254},
doi = {10.1145/1873951.1874254},
abstract = {This paper presents Torchvision an open source machine vision package for Torch. Torch is a machine learning library providing a series of the state-of-the-art algorithms such as Neural Networks, Support Vector Machines, Gaussian Mixture Models, Hidden Markov Models and many others. Torchvision provides additional functionalities to manipulate and process images with standard image processing algorithms. Hence, the resulting images can be used directly with the Torch machine learning algorithms as Torchvision is fully integrated with Torch. Both Torch and Torchvision are written in C++ language and are publicly available under the Free-BSD License.},
booktitle = {Proceedings of the 18th ACM International Conference on Multimedia},
pages = {1485–1488},
numpages = {4},
keywords = {face detection and recognition, vision, machine learning, open source, pattern recognition},
location = {Firenze, Italy},
series = {MM '10}
}
@inproceedings{deng2009imagenet,
title = {Imagenet: A large-scale hierarchical image database},
author = {Deng, Jia and Dong, Wei and Socher, Richard and Li, Li-Jia and Li, Kai and Fei-Fei, Li},
booktitle = {2009 IEEE conference on computer vision and pattern recognition},
pages = {248--255},
year = {2009},
organization = {Ieee}
}
@inproceedings{DBLP:journals/corr/KingmaB14,
author = {Diederik P. Kingma and
Jimmy Ba},
editor = {Yoshua Bengio and
Yann LeCun},
title = {Adam: {A} Method for Stochastic Optimization},
booktitle = {3rd International Conference on Learning Representations, {ICLR} 2015,
San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings},
year = {2015},
url = {http://arxiv.org/abs/1412.6980},
timestamp = {Thu, 25 Jul 2019 14:25:37 +0200},
biburl = {https://dblp.org/rec/journals/corr/KingmaB14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{Chaki2015ACO,
title = {A comparison of three discrete methods for classification of heart disease data},
author = {Dipankar Chaki and Arkadeep Das and Moinul Islam Zaber},
journal = {Bangladesh Journal of Scientific and Industrial Research},
year = {2015},
volume = {50},
pages = {293-296}
}
@misc{Mathematica,
author = {Wolfram Research{,} Inc.},
title = {Mathematica, {V}ersion 13.0.0},
url = {https://www.wolfram.com/mathematica},
note = {Champaign, IL, 2021}
}
@misc{Synthesi33:online,
author = {},
title = { : New in Wolfram Language 12},
howpublished = {\url{https://www.wolfram.com/language/12/high-level-machine-learning/synthesize-missing-values-in-numeric-data.html?product=language}},
month = {},
year = {},
note = {(Accessed on 05/25/2022)}
}
@article{10.1162/neco.1989.1.4.541,
author = {LeCun, Y. and Boser, B. and Denker, J. S. and Henderson, D. and Howard, R. E. and Hubbard, W. and Jackel, L. D.},
title = "{Backpropagation Applied to Handwritten Zip Code Recognition}",
journal = {Neural Computation},
volume = {1},
number = {4},
pages = {541-551},
year = {1989},
month = {12},
abstract = "{The ability of learning networks to generalize can be greatly enhanced by providing constraints from the task domain. This paper demonstrates how such constraints can be integrated into a backpropagation network through the architecture of the network. This approach has been successfully applied to the recognition of handwritten zip code digits provided by the U.S. Postal Service. A single network learns the entire recognition operation, going from the normalized image of the character to the final classification.}",
issn = {0899-7667},
doi = {10.1162/neco.1989.1.4.541},
url = {https://doi.org/10.1162/neco.1989.1.4.541},
eprint = {https://direct.mit.edu/neco/article-pdf/1/4/541/811941/neco.1989.1.4.541.pdf},
}
@inproceedings{barnard,
title = {Bootstrap prediction intervals in non-parametric regression with applications to anomaly detection},
author = {Kumar, Sricharan and Srivistava, Ashok N},
booktitle = {The 18th ACM SIGKDD Conference on Knowledge Discovery and Data Mining},
number = {ARC-E-DAA-TN6188},
year = {2012}
}
@inproceedings{domainrep,
author = {Ben-David, Shai and Blitzer, John and Crammer, Koby and Pereira, Fernando},
booktitle = {Advances in Neural Information Processing Systems},
editor = {B. Sch\"{o}lkopf and J. Platt and T. Hoffman},
publisher = {MIT Press},
title = {Analysis of Representations for Domain Adaptation},
url = {https://proceedings.neurips.cc/paper/2006/file/b1b0432ceafb0ce714426e9114852ac7-Paper.pdf},
volume = {19},
year = {2006},
bdsk-url-1 = {https://proceedings.neurips.cc/paper/2006/file/b1b0432ceafb0ce714426e9114852ac7-Paper.pdf} }
@book{vapnik95,
added-at = {2019-04-26T12:44:37.000+0200},