-
Notifications
You must be signed in to change notification settings - Fork 12
/
library.bib
117 lines (108 loc) · 8.6 KB
/
library.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
@article{ates_counterfactual_2021,
title = {Counterfactual {Explanations} for {Machine} {Learning} on {Multivariate} {Time} {Series} {Data}},
url = {http://arxiv.org/abs/2008.10781},
doi = {10.1109/ICAPAI49758.2021.9462056},
abstract = {Applying machine learning (ML) on multivariate time series data has growing popularity in many application domains, including in computer system management. For example, recent high performance computing (HPC) research proposes a variety of ML frameworks that use system telemetry data in the form of multivariate time series so as to detect performance variations, perform intelligent scheduling or node allocation, and improve system security. Common barriers for adoption for these ML frameworks include the lack of user trust and the difficulty of debugging. These barriers need to be overcome to enable the widespread adoption of ML frameworks in production systems. To address this challenge, this paper proposes a novel explainability technique for providing counterfactual explanations for supervised ML frameworks that use multivariate time series data. The proposed method outperforms state-of-the-art explainability methods on several different ML frameworks and data sets in metrics such as faithfulness and robustness. The paper also demonstrates how the proposed method can be used to debug ML frameworks and gain a better understanding of HPC system telemetry data.},
urldate = {2022-03-25},
journal = {2021 International Conference on Applied Artificial Intelligence (ICAPAI)},
author = {Ates, Emre and Aksar, Burak and Leung, Vitus J. and Coskun, Ayse K.},
month = may,
year = {2021},
note = {arXiv: 2008.10781},
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning, Computer Science - Artificial Intelligence},
pages = {1--8},
file = {arXiv Fulltext PDF:/home/jacqueline/Zotero/storage/T26TYK8H/Ates et al. - 2021 - Counterfactual Explanations for Machine Learning o.pdf:application/pdf;arXiv.org Snapshot:/home/jacqueline/Zotero/storage/AWI6C3C4/2008.html:text/html},
}
@incollection{sanchez-ruiz_instance-based_2021,
address = {Cham},
title = {Instance-{Based} {Counterfactual} {Explanations} for {Time} {Series} {Classification}},
volume = {12877},
isbn = {978-3-030-86956-4 978-3-030-86957-1},
abstract = {In recent years, there has been a rapidly expanding focus on explaining the predictions made by black-box AI systems that handle image and tabular data. However, considerably less attention has been paid to explaining the predictions of opaque AI systems handling time series data. In this paper, we advance a novel model-agnostic, case-based technique – Native Guide – that generates counterfactual explanations for time series classifiers. Given a query time series, Tq, for which a black-box classification system predicts class, c, a counterfactual time series explanation shows how Tq could change, such that the system predicts an alternative class, c . The proposed instance-based technique adapts existing counterfactual instances in the case-base by highlighting and modifying discriminative areas of the time series that underlie the classification. Quantitative and qualitative results from two comparative experiments indicate that Native Guide generates plausible, proximal, sparse and diverse explanations that are better than those produced by key benchmark counterfactual methods.},
language = {en},
booktitle = {Case-{Based} {Reasoning} {Research} and {Development}},
publisher = {Springer International Publishing},
author = {Delaney, Eoin and Greene, Derek and Keane, Mark T.},
editor = {Sánchez-Ruiz, Antonio A. and Floyd, Michael W.},
year = {2021},
note = {Series Title: Lecture Notes in Computer Science},
pages = {32--47},
file = {Delaney et al. - 2021 - Instance-Based Counterfactual Explanations for Tim.pdf:/home/jacqueline/Zotero/storage/XIAIBPNB/Delaney et al. - 2021 - Instance-Based Counterfactual Explanations for Tim.pdf:application/pdf},
}
@inproceedings{guilleme_agnostic_2019,
address = {Portland, OR, USA},
title = {Agnostic {Local} {Explanation} for {Time} {Series} {Classification}},
isbn = {978-1-72813-798-8},
booktitle = {2019 {IEEE} 31st {International} {Conference} on {Tools} with {Artificial} {Intelligence} ({ICTAI})},
publisher = {IEEE},
author = {Guilleme, Mael and Masson, Veronique and Roze, Laurence and Termier, Alexandre},
month = nov,
year = {2019},
pages = {432--439},
}
@article{ismail_benchmarking_2020,
title = {Benchmarking {Deep} {Learning} {Interpretability} in {Time} {Series} {Predictions}},
abstract = {Saliency methods are used extensively to highlight the importance of input features in model predictions. These methods are mostly used in vision and language tasks, and their applications to time series data is relatively unexplored. In this paper, we set out to extensively compare the performance of various saliency-based interpretability methods across diverse neural architectures, including Recurrent Neural Network, Temporal Convolutional Networks, and Transformers in a new benchmark of synthetic time series data. We propose and report multiple metrics to empirically evaluate the performance of saliency methods for detecting feature importance over time using both precision (i.e., whether identified features contain meaningful signals) and recall (i.e., the number of features with signal identified as important). Through several experiments, we show that (i) in general, network architectures and saliency methods fail to reliably and accurately identify feature importance over time in time series data, (ii) this failure is mainly due to the conflation of time and feature domains, and (iii) the quality of saliency maps can be improved substantially by using our proposed two-step temporal saliency rescaling (TSR) approach that first calculates the importance of each time step before calculating the importance of each feature at a time step.},
journal = {arXiv:2010.13924 [cs, stat]},
author = {Ismail, Aya Abdelsalam and Gunady, Mohamed and Bravo, Héctor Corrada and Feizi, Soheil},
month = oct,
year = {2020},
note = {arXiv: 2010.13924},
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning},
file = {arXiv Fulltext PDF:/home/jacqueline/Zotero/storage/E78XLXRN/Ismail et al. - 2020 - Benchmarking Deep Learning Interpretability in Tim.pdf:application/pdf;arXiv.org Snapshot:/home/jacqueline/Zotero/storage/HVC8SWGT/2010.html:text/html},
}
@misc{meudec_raphael_tf-explain_2021,
title = {tf-explain},
copyright = {Open Access},
url = {https://zenodo.org/record/5711704},
abstract = {Interpretability Methods for tf.keras models with TensorFlow 2.x},
urldate = {2022-05-24},
publisher = {Zenodo},
author = {Meudec, Raphael},
month = feb,
year = {2021},
doi = {10.5281/ZENODO.5711704},
annote = {Other
If you use tf-explain in your research, please cite it using these metadata.},
}
@article{kokhlikyan2020captum,
title={Captum: A unified and generic model interpretability library for pytorch},
author={Kokhlikyan, Narine and Miglani, Vivek and Martin, Miguel and Wang, Edward and Alsallakh, Bilal and Reynolds, Jonathan and Melnikov, Alexander and Kliushkina, Natalia and Araya, Carlos and Yan, Siqi and others},
journal={arXiv preprint arXiv:2009.07896},
year={2020}
}
@article{gunning2017explainable,
title={Explainable artificial intelligence (xai)},
author={Gunning, David},
journal={Defense advanced research projects agency (DARPA), nd Web},
volume={2},
number={2},
pages={1},
year={2017}
}
@article{honegger2018shedding,
title={Shedding light on black box machine learning algorithms},
author={Honegger, Milo R},
journal={Development of an Axiomatic Framework to Assess the Quality of Methods that Explain Individual Predictions, MA Karlsruhe},
year={2018}
}
@article{rojat2021explainable,
title={Explainable artificial intelligence (xai) on timeseries data: A survey},
author={Rojat, Thomas and Puget, Rapha{\"e}l and Filliat, David and Del Ser, Javier and Gelin, Rodolphe and D{\'\i}az-Rodr{\'\i}guez, Natalia},
journal={arXiv preprint arXiv:2104.00950},
year={2021}
}
@inproceedings{hollig2022tsevo,
title={TSEvo: Evolutionary Counterfactual Explanations for Time Series Classification},
author={H{\"o}llig, Jacqueline and Kulbach, Cedric and Thoma, Steffen},
booktitle={2022 21st IEEE International Conference on Machine Learning and Applications (ICMLA)},
pages={29--36},
year={2022},
organization={IEEE}
}
@article{bahri2022shapelet,
title={Shapelet-based counterfactual explanations for multivariate time series},
author={Bahri, Omar and Boubrahimi, Soukaina Filali and Hamdi, Shah Muhammad},
journal={arXiv preprint arXiv:2208.10462},
year={2022}
}