This repository has been archived by the owner on Jun 4, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
bibliography.bib
882 lines (802 loc) · 46.7 KB
/
bibliography.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
% Encoding: UTF-8
@Article{Appel2017,
author = {Kristoffer Appel and Gordon Pipa and Martin Dresler},
title = {Investigating consciousness in the sleep laboratory -- an interdisciplinary perspective on lucid dreaming},
journal = {Interdisciplinary Science Reviews},
year = {2017},
month = {nov},
doi = {10.1080/03080188.2017.1380468},
publisher = {Informa {UK} Limited},
}
@Article{Barnes2010,
author = {Nick Barnes},
title = {Publish your computer code: it is good enough},
journal = {Nature},
year = {2010},
month = oct,
doi = {10.1038/467753a},
}
@Article{Bekerman2014,
author = {Inessa Bekerman and Paul Gottlieb and Michael Vaiman},
title = {Variations in Eyeball Diameters of the Healthy Adults},
journal = {Journal of Ophthalmology},
year = {2014},
volume = {2014},
month = nov,
doi = {10.1155/2014/503645},
url = {https://www.hindawi.com/journals/joph/2014/503645/},
}
@Misc{Biggs2016,
author = {John Biggs},
title = {{The Eye Tribe Tracker Pro} Offers Affordable Eye Tracking For \$199},
howpublished = {https://techcrunch.com/2016/01/14/the-eye-tribe-tracker-pro-offers-affordable-eye-tracking-for-199/},
month = jan,
year = {2016},
note = {Accessed:~2018-01-26},
url = {https://techcrunch.com/2016/01/14/the-eye-tribe-tracker-pro-offers-affordable-eye-tracking-for-199/},
}
@Article{Bradski2000,
author = {Gary Bradski},
title = {{The OpenCV Library}},
journal = {{Dr. Dobb's Journal of Software Tools}},
year = {2000},
month = dec,
url = {https://opencv.org},
}
@Misc{CC0License,
author = {{Creative Commons}},
title = {{CC0 1.0 Universal}},
howpublished = {https://creativecommons.org/publicdomain/zero/1.0/legalcode},
month = jan,
year = {2018},
note = {Accessed: 2018-01-13},
url = {https://creativecommons.org/publicdomain/zero/1.0/legalcode},
}
@Article{Chennamma2013,
author = {H. R. Chennamma and Xiaohui Yuan},
title = {A Survey on Eye-Gaze Tracking Techniques},
journal = {arXiv},
year = {2013},
note = {arXiv:1312.6410v1},
abstract = {Study of eye-movement is being employed in Human Computer Interaction (HCI) research. Eye - gaze tracking is one of the most challenging problems in the area of computer vision. The goal of this paper is to present a review of latest research in this continued growth of remote eye-gaze tracking. This overview includes the basic definitions and terminologies, recent advances in the field and finally the need of future development in the field.},
file = {online:http\://arxiv.org/pdf/1312.6410v1:PDF},
keywords = {cs.CV},
}
@Misc{Constine2016,
author = {Josh Constine},
title = {Oculus acquires eye-tracking startup {The Eye Tribe}},
howpublished = {https://techcrunch.com/2016/12/28/the-eye-tribe-oculus/},
month = dec,
year = {2016},
note = {Accessed: 2018-01-26},
url = {https://techcrunch.com/2016/12/28/the-eye-tribe-oculus/},
}
@Misc{Crisp2013,
author = {Simon Crisp},
title = {Camera sensor size: Why does it matter and exactly how big are they?},
howpublished = {https://newatlas.com/camera-sensor-size-guide/26684/},
month = mar,
year = {2013},
note = {Accessed: 2018-01-28},
url = {https://newatlas.com/camera-sensor-size-guide/26684/},
}
@Article{Dalmaijer2014,
author = {Edwin Dalmaijer and Sebastiaan Math\^ot and Stefan {van der Stigchel}},
title = {{PyGaze}: an open-source, cross-platform toolbox for minimal-effort programming of eye tracking experiments},
journal = {Behavior Research Methods},
year = {2014},
volume = {46},
number = {4},
pages = {913--921},
month = dec,
abstract = {The PyGaze toolbox is an open-source software package for Python, a high-level programming language. It is designed for creating eyetracking experiments in Python syntax with the least possible effort, and it offers programming ease and script readability without constraining functionality and flexibility. PyGaze can be used for visual and auditory stimulus presentation; for response collection via keyboard, mouse, joystick, and other external hardware; and for the online detection of eye movements using a custom algorithm. A wide range of eyetrackers of different brands (EyeLink, SMI, and Tobii systems) are supported. The novelty of PyGaze lies in providing an easy-to-use layer on top of the many different software libraries that are required for implementing eyetracking experiments. Essentially, PyGaze is a software bridge for eyetracking research.},
doi = {10.3758/s13428-013-0422-2},
keywords = {Eyetracking, Open-source, Software, Python, PsychoPy, Gaze contingency, read},
url = {https://link.springer.com/article/10.3758%2Fs13428-013-0422-2},
}
@Misc{Dalmaijer2015,
author = {Edwin Dalmaijer},
title = {Webcam Eye Tracker},
howpublished = {http://www.pygaze.org/2015/06/webcam-eye-tracker/},
month = jun,
year = {2015},
note = {Accessed: 2018-01-26},
url = {http://www.pygaze.org/2015/06/webcam-eye-tracker/},
}
@InCollection{Davson2017,
author = {Hugh Davson},
title = {Human Eye},
booktitle = {Encyclopædia Britannica},
publisher = {Encyclopædia Britannica, Inc.},
year = {2017},
month = sep,
note = {Accessed: 2017-12-11},
abstract = {Human eye, in humans, specialized sense organ capable of receiving visual images, which are then carried to the brain.},
url = {https://www.britannica.com/science/human-eye},
}
@Misc{Driessen2010,
author = {Vincent Driessen},
title = {A successful Git branching model},
howpublished = {http://nvie.com/posts/a-successful-git-branching-model/},
month = jan,
year = {2010},
note = {Accessed: 2018-01-02},
url = {http://nvie.com/posts/a-successful-git-branching-model/},
}
@InCollection{Duc2008,
author = {Albert Hoang Duc and Paul Bays and Masud Husain},
title = {Eye movements as a probe of attention},
booktitle = {{Progress in Brain Research}},
publisher = {Elsevier},
year = {2008},
pages = {403--411},
doi = {10.1016/s0079-6123(08)00659-6},
}
@Article{Duchowski2002,
author = {Andrew T. Duchowski},
title = {A breadth-first survey of eye-tracking applications},
journal = {Behavior Research Methods, Instruments, {\&} Computers},
year = {2002},
volume = {34},
number = {4},
pages = {455--470},
month = {nov},
doi = {10.3758/bf03195475},
publisher = {Springer Nature},
}
@Book{Duchowski2007,
title = {Eye Tracking Methodology},
publisher = {Springer},
year = {2007},
author = {Andrew Duchowski},
edition = {Second Edition},
isbn = {978-1-84628-608-7},
url = {http://www.ebook.de/de/product/8900341/andrew_duchowski_andrew_duchowski_eye_tracking_methodology.html},
}
@Misc{Facebase,
author = {Seth M. Weinberg and Mary L. Marazita},
title = {3D Facial Norms Database},
howpublished = {https://www.facebase.org/facial_norms},
month = nov,
year = {2009},
note = {Accessed: 2018-01-05. NIDCR Grant: 1-U01-DE020078},
abstract = {Although ample evidence exists that facial appearance and structure are highly heritable, there is a dearth of information regarding how variation in specific genes relates to the diversity of facial forms evident in our species. With the advent of affordable, non-invasive 3D surface imaging technology, it is now possible to capture detailed quantitative information about the face in a large number of individuals. By coupling state- of-the-art 3D imaging with advances in high-throughput genotyping, an unparalleled opportunity exists to map the genetic determinants of normal facial variation. An improved understanding of the relationship between genotype and facial phenotype may help illuminate the factors influencing liability to common craniofacial anomalies, particularly orofacial clefts, which are among the most prevalent birth defects in humans.},
url = {https://www.facebase.org/facial_norms},
}
@Article{Fan2016,
author = {Haoqiang Fan and Erjin Zhou},
title = {Approaching human level facial landmark localization by deep learning},
journal = {Image and Vision Computing},
year = {2016},
volume = {47},
pages = {27--35},
month = {mar},
doi = {10.1016/j.imavis.2015.11.004},
publisher = {Elsevier {BV}},
}
@MastersThesis{Ferhat2012,
author = {Onur Ferhat},
title = {Eye-Tracking with Webcam-Based Setups: Implementation of a Real-Time System and an Analysis of Factors Affecting Performance},
school = {Universitat Autònoma de Barcelona},
year = {2012},
url = {http://refbase.cvc.uab.es/files/Fer2012.pdf},
}
@InProceedings{Filho2010,
author = {H\'elio Perroni Filho and Alberto Ferreira De Souza},
title = {{VG-RAM WNN} approach to monocular depth perception},
booktitle = {Neural Information Processing. Models and Applications},
year = {2010},
editor = {Kok Wai Wong and Balapuwaduge Sumudu Udaya Mendis and Abdesselam Bouzerdoum},
volume = {6444},
series = {Lecture Notes in Computer Science},
pages = {509--516},
address = {Berlin/Heidelberg, Germany},
month = nov,
publisher = {Springer},
abstract = {We have examined Virtual Generalizing Random Access Memory Weightless Neural Networks (VG-RAM WNN) as platform for depth map inference from static monocular images. For that, we have designed, implemented and compared the performance of VG-RAM WNN systems against that of depth estimation systems based on Markov Random Field (MRF) models. While not surpassing the performance of such systems, our results are consistent to theirs, and allow us to infer important features of the human visual cortex.},
doi = {10.1007/978-3-642-17534-3_63},
keywords = {Monocular depth perception, weightless neural networks},
url = {https://www.researchgate.net/publication/220000031_VG-RAM_WNN_approach_to_monocular_depth_perception},
}
@InProceedings{Fini2011,
author = {Mohammad Reza Ramezanpour Fini and Mohammad Ali Azimi Kashani and Mohammad Rahmati},
title = {Eye detection and tracking in image with complex background},
booktitle = {{3rd International Conference on Electronics Computer Technology}},
year = {2011},
month = {apr},
publisher = {{IEEE}},
doi = {10.1109/icectech.2011.5942050},
}
@Misc{Frischholz2018,
author = {Frischholz},
title = {Face Detection \& Recognition Homepage},
month = jan,
year = {2018},
note = {Accessed: 2018-01-14},
url = {https://facedetection.com/software/},
}
@Article{George2016,
author = {Anjith George and Aurobinda Routray},
title = {Fast and accurate algorithm for eye localisation for gaze tracking in low-resolution images},
journal = {{IET} Computer Vision},
year = {2016},
volume = {10},
number = {7},
pages = {660--669},
month = {oct},
doi = {10.1049/iet-cvi.2015.0316},
publisher = {Institution of Engineering and Technology ({IET})},
}
@InBook{Gross2008,
chapter = {Human Eye},
pages = {1--87},
title = {Survey of Optical Instruments},
publisher = {Wiley},
year = {2008},
author = {Herbert Gross and Fritz Blechinger and Bertram Achtner},
editor = {Herber Gross},
volume = {4},
series = {Handbook of Optical Systems},
month = mar,
isbn = {978-3-527-40380-6},
url = {https://application.wiley-vch.de/books/sample/3527403809_c01.pdf},
}
@Article{Hansen2005,
author = {Dan Witzner Hansen and Arthur E.C. Pece},
title = {Eye tracking in the wild},
journal = {Computer Vision and Image Understanding},
year = {2005},
volume = {98},
number = {1},
pages = {155--181},
month = {apr},
doi = {10.1016/j.cviu.2004.07.013},
publisher = {Elsevier {BV}},
}
@Article{Hansen2010,
author = {Dan Witzner Hansen and Qiang Ji},
title = {In the Eye of the Beholder: A Survey of Models for Eyes and Gaze},
journal = {{IEEE} Transactions on Pattern Analysis and Machine Intelligence},
year = {2010},
volume = {32},
number = {3},
pages = {478--500},
month = feb,
issn = {0162-8828},
doi = {10.1109/TPAMI.2009.30},
publisher = {Institute of Electrical and Electronics Engineers ({IEEE})},
}
@Book{Helmholtz1866,
title = {{Handbuch der physiologischen Optik} [{H}andbook of physiological optics]},
publisher = {Gustav Karsten},
year = {1866},
author = {Hermann Helmholtz},
editor = {P. W. Brix and G. Decher and F. C. O. von Feilitzsch and F. Grashof and F. Harms and H. Helmholtz and G. Karsten and H. Karsten and C. Kuhn and J. Lamont and J. Pfeffer and E. E. Schmid and F. Schulz and L. Seidel and G. Weyer and W. Wundt},
volume = {IX},
series = {Allgemeine Encyklopädie der Physik},
url = {https://archive.org/details/handbuchderphysi00helm},
}
@Book{Huey1908,
title = {The psychology and pedagogy of reading},
publisher = {The Macmillian Company},
year = {1908},
author = {Edmund Burke Huey},
month = jan,
url = {https://archive.org/details/psychologyandpe00hueygoog},
}
@Misc{Hume2012,
author = {Tristan Hume},
title = {Simple, accurate eye center tracking in {OpenCV}},
howpublished = {http://thume.ca/projects/2012/11/04/simple-accurate-eye-center-tracking-in-opencv/},
month = nov,
year = {2012},
note = {Accessed: 2018-01-11},
url = {http://thume.ca/projects/2012/11/04/simple-accurate-eye-center-tracking-in-opencv/},
}
@InProceedings{Jesorsky2001,
author = {Oliver Jesorsky and Klaus J. Kirchberg and Robert W. Frischholz},
title = {Robust Face Detection Using the {Hausdorff} Distance},
booktitle = {{Third International Conference on Audio- and Video-based Biometric Person Authentication}},
year = {2001},
series = {Lecture Notes in Computer Science},
pages = {90--95},
publisher = {Springer},
doi = {10.1007/3-540-45344-X_14},
}
@Article{Jia2014,
author = {Yangqing Jia and Evan Shelhamer and Jeff Donahue and Sergey Karayev and Jonathan Long and Ross Girshick and Sergio Guadarrama and Trevor Darrell},
title = {Caffe: Convolutional Architecture for Fast Feature Embedding},
journal = {arXiv},
year = {2014},
note = {arXiv:1408.5093},
abstract = {Caffe provides multimedia scientists and practitioners with a clean and modifiable framework for state-of-the-art deep learning algorithms and a collection of reference models. The framework is a BSD-licensed C++ library with Python and MATLAB bindings for training and deploying general-purpose convolutional neural networks and other deep models efficiently on commodity architectures. Caffe fits industry and internet-scale media needs by CUDA GPU computation, processing over 40 million images a day on a single K40 or Titan GPU ($\approx$ 2.5 ms per image). By separating model representation from actual implementation, Caffe allows experimentation and seamless switching among platforms for ease of development and deployment from prototyping machines to cloud environments. Caffe is maintained and developed by the Berkeley Vision and Learning Center (BVLC) with the help of an active community of contributors on GitHub. It powers ongoing research projects, large-scale industrial applications, and startup prototypes in vision, speech, and multimedia.},
file = {online:http\://arxiv.org/pdf/1408.5093v1:PDF},
}
@InProceedings{Judd2009,
author = {Tilke Judd and Krista Ehinger and Fr\'edo Durand and Antonio Torralba},
title = {{Learning to Predict Where Humans Look}},
booktitle = {{12th International Conference on Computer Vision}},
year = {2009},
pages = {2106--2113},
month = sep,
publisher = {{IEEE}},
abstract = {For many applications in graphics, design, and human computer interaction, it is essential to understand where humans look in a scene. Where eye tracking devices are not a viable option, models of saliency can be used to predict fixation locations. Most saliency approaches are based on bottom-up computation that does not consider top-down image semantics and often does not match actual eye movements. To address this problem, we collected eye tracking data of 15 viewers on 1003 images and use this database as training and testing examples to learn a model of saliency based on low, middle and high-level image features. This large database of eye tracking data is publicly available with this paper.},
doi = {10.1109/ICCV.2009.5459462},
url = {http://people.csail.mit.edu/tjudd/WherePeopleLook/},
}
@Article{Jun2016,
author = {Dongwook Jun and Jong Man Lee and Su Yeong Gwon and Weiyuan Pan and Hyeon Chang Lee and Kang Ryoung Park and Hyun-Cheol Kim},
title = {Compensation Method of Natural Head Movement for Gaze Tracking System Using an Ultrasonic Sensor for Distance Measurement},
journal = {Sensors},
year = {2016},
volume = {16},
number = {110},
month = jan,
abstract = {Most gaze tracking systems are based on the pupil center corneal reflection (PCCR) method using near infrared (NIR) illuminators. One advantage of the PCCR method is the high accuracy it achieves in gaze tracking because it compensates for the pupil center position based on the relative position of corneal specular reflection (SR). However, the PCCR method only works for user head movements within a limited range, and its performance is degraded by the natural movement of the user's head. To overcome this problem, we propose a gaze tracking method using an ultrasonic sensor that is robust to the natural head movement of users. Experimental results demonstrate that with our compensation method the gaze tracking system is more robust to natural head movements compared to other systems without our method and commercial systems.},
doi = {10.3390/s16010110},
keywords = {gaze tracking system, compensation of head movements, ultrasonic sensor, natural head movement},
url = {http://www.mdpi.com/1424-8220/16/1/110},
}
@Article{King2009,
author = {Davis E. King},
title = {{Dlib-ml}: A Machine Learning Toolkit},
journal = {Journal of Machine Learning Research},
year = {2009},
volume = {10},
pages = {1755--1758},
month = jul,
note = {\url{http://dlib.net/}},
abstract = {There are many excellent toolkits which provide support for developing machine learning software in Python, R, Matlab, and similar environments. Dlib-ml is an open source library, targeted at both engineers and research scientists, which aims to provide a similarly rich environment for developing machine learning software in the C++ language. Towards this end, dlib-ml contains an extensible linear algebra toolkit with built in BLAS support. It also houses implementations of algorithms for performing inference in Bayesian networks and kernel-based methods for classifi- cation, regression, clustering, anomaly detection, and feature ranking. To enable easy use of these tools, the entire library has been developed with contract programming, which provides complete and precise documentation as well as powerful debugging tools.},
doi = {10.1145/1577069.1755843},
keywords = {kernel-methods, svm, rvm, kernel clustering, C++, Bayesian networks},
url = {http://jmlr.csail.mit.edu/papers/volume10/king09a/king09a.pdf},
}
@Misc{King2014,
author = {Davis E. King},
title = {Dlib 18.6 released: Make your own object detector!},
howpublished = {http://blog.dlib.net/2014/02/dlib-186-released-make-your-own-object.html},
month = feb,
year = {2014},
note = {Accessed: 2018-01-14},
url = {http://blog.dlib.net/2014/02/dlib-186-released-make-your-own-object.html},
}
@Article{King2015,
author = {Davis E. King},
title = {{Max-Margin Object Detection}},
journal = {arXiv},
year = {2015},
note = {arXiv:1502.00046},
}
@InProceedings{Krafka2016,
author = {Kyle Krafka and Aditya Khosla and Petr Kellnhofer and Harini Kannan and Suchendra Bhandarkar and Wojciech Matusik and Antonio Torralba},
title = {Eye Tracking for Everyone},
booktitle = {{IEEE Conference on Computer Vision and Pattern Recognition}},
year = {2016},
pages = {2176--2184},
publisher = {{IEEE}},
note = {\url{http://gazecapture.csail.mit.edu/index.php}},
abstract = {From scientific research to commercial applications, eye tracking is an important tool across many domains. Despite its range of applications, eye tracking has yet to become a pervasive technology. We believe that we can put the power of eye tracking in everyone's palm by building eye tracking software that works on commodity hardware such as mobile phones and tablets, without the need for additional sensors or devices. We tackle this problem by introducing GazeCapture, the first large-scale dataset for eye tracking, containing data from over 1450 people consisting of almost 2:5M frames. Using GazeCapture, we train iTracker, a convolutional neural network for eye tracking, which achieves a significant reduction in error over previous approaches while running in real time (10-15fps) on a modern mobile device. Our model achieves a prediction error of 1.71cm and 2.53cm without calibration on mobile phones and tablets respectively. With calibration, this is reduced to 1.34cm and 2.12cm. Further, we demonstrate that the features learned by iTracker generalize well to other datasets, achieving state-of-the-art results. The code, data, and models are available at http://gazecapture.csail.mit.edu.},
doi = {10.1109/CVPR.2016.239},
url = {http://gazecapture.csail.mit.edu/index.php},
}
@InProceedings{Lemaignan2016,
author = {Séverin Lemaignan and Fernando Garcia and Alexis Jacq and Pierre Dillenbourg},
title = {From real-time attention assessment to ``with-me-nes'' in human-robot interaction},
booktitle = {{11th {ACM}/{IEEE} International Conference on Human-Robot Interaction}},
year = {2016},
month = {mar},
publisher = {{IEEE}},
doi = {10.1109/hri.2016.7451747},
}
@Article{Lepetit2009,
author = {Vincent Lepetit and Francesc Moreno-Noguer and Pascal Fua},
title = {{EP\emph{n}P}: An Accurate $O(n)$ Solution to the {P\emph{n}P} Problem},
journal = {International Journal of Computer Vision},
year = {2009},
volume = {81},
number = {2},
pages = {155--166},
doi = {10.1007/s11263-008-0152-6},
}
@Article{Levenberg1944,
author = {Kenneth Levenberg},
title = {A method for the solution of certain non-linear problems in least squares},
journal = {Quaterly of Applied Mathematics},
year = {1944},
volume = {2},
number = {2},
pages = {164--168},
month = jul,
doi = {10.1090/qam/10666},
}
@InProceedings{Li2005,
author = {Dongheng Li and David Winfield and Derrick J. Parkhurst},
title = {Starburst: A hybrid algorithm for video-based eye tracking combining feature-based and model-based approaches},
booktitle = {{IEEE Conference on Computer Vision and Pattern Recognition}},
year = {2005},
publisher = {{IEEE}},
doi = {10.1109/cvpr.2005.531},
}
@Misc{Luepke2005,
author = {Lara Luepke},
title = {{Apple iSight review}},
howpublished = {https://www.cnet.com/products/apple-isight/review/},
month = aug,
year = {2005},
note = {Accessed: 2018-01-18},
url = {https://www.cnet.com/products/apple-isight/specs/},
}
@Misc{Mahler2017,
author = {Philip Mahler},
title = {Eye Tracker Prices – An Overview of 15+ Eye Trackers},
howpublished = {https://imotions.com/blog/eye-tracker-prices/},
month = jun,
year = {2017},
note = {Accessed: 2017-01-26},
url = {https://imotions.com/blog/eye-tracker-prices/},
}
@Misc{Mallick2016,
author = {Satya Mallick},
title = {Head Pose Estimation using {OpenCV} and {Dlib}},
howpublished = {https://www.learnopencv.com/head-pose-estimation-using-opencv-and-dlib},
month = sep,
year = {2016},
note = {Accessed: 2018-01-10},
url = {https://www.learnopencv.com/head-pose-estimation-using-opencv-and-dlib/},
}
@Article{Marquardt1963,
author = {Donald W. Marquardt},
title = {An Algorithm for Least-Squares Estimation of Nonlinear Parameters},
journal = {Journal of the Society for Industrial and Applied Mathematics},
year = {1963},
volume = {11},
number = {2},
pages = {431--441},
doi = {10.1137/0111030},
}
@Misc{MITLicense,
author = {{Open Source Initiative}},
title = {The {MIT} License},
howpublished = {https://opensource.org/licenses/MIT},
month = jan,
year = {2018},
note = {Accessed: 2018-01-01},
url = {https://opensource.org/licenses/MIT},
}
@InProceedings{Nagamatsu2009,
author = {Takashi Nagamatsu and Junzo Kamahara and Naoki Tanaka},
title = {Calibration-free gaze tracking using a binocular 3D eye model},
booktitle = {{Extended Abstracts on Human Factors in Computing Systems}},
year = {2009},
publisher = {{ACM} Press},
doi = {10.1145/1520340.1520543},
}
@InProceedings{Newman2000,
author = {R. Newman and Y. Matsumoto and S. Rougeaux and A. Zelinsky},
title = {Real-time stereo tracking for head pose and gaze estimation},
booktitle = {{4th International Conference on Automatic Face and Gesture Recognition}},
year = {2000},
publisher = {{IEEE}},
doi = {10.1109/afgr.2000.840622},
}
@Article{Noureddin2012,
author = {B. Noureddin and P. D. Lawrence and G. E. Birch},
title = {Online Removal of Eye Movement and Blink {EEG} Artifacts Using a High-Speed Eye Tracker},
journal = {{IEEE} Transactions on Biomedical Engineering},
year = {2012},
volume = {59},
number = {8},
pages = {2103--2110},
month = {aug},
doi = {10.1109/tbme.2011.2108295},
publisher = {Institute of Electrical and Electronics Engineers ({IEEE})},
}
@InProceedings{Ohno2004,
author = {Takehiko Ohno and Naoki Mukawa},
title = {A free-head, simple calibration, gaze tracking system that enables gaze-based interaction},
booktitle = {Symposium on Eye Tracking Research {\&} Applications},
year = {2004},
publisher = {{ACM} Press},
doi = {10.1145/968363.968387},
}
@InProceedings{Papoutsaki2016,
author = {Alexandra Papoutsaki and Patsorn Sangkloy and James Laskey and Nediyana Daskalova and Jeff Huang and James Hays},
title = {{WebGazer}: Scalable Webcam Eye Tracking Using User Interactions},
booktitle = {{25th International Joint Conference on Artificial Intelligence}},
year = {2016},
pages = {3839--3845},
organization = {AAAI},
url = {http://webgazer.cs.brown.edu},
}
@InCollection{Park2002,
author = {Kang Ryoung Park and Jeong Jun Lee and Jaihie Kim},
title = {Facial and Eye Gaze Detection},
booktitle = {{Biologically Motivated Computer Vision}},
publisher = {Springer},
year = {2002},
pages = {368--376},
doi = {10.1007/3-540-36181-2_37},
}
@Article{Patacchiola2017,
author = {Massimiliano Patacchiola and Angelo Cangelosi},
title = {Head pose estimation in the wild using Convolutional Neural Networks and adaptive gradient methods},
journal = {Pattern Recognition},
year = {2017},
volume = {71},
pages = {132--143},
month = {nov},
doi = {10.1016/j.patcog.2017.06.009},
publisher = {Elsevier},
}
@InProceedings{Patney2016,
author = {Anjul Patney and Joohwan Kim and Marco Salvi and Anton Kaplanyan and Chris Wyman and Nir Benty and Aaron Lefohn and David Luebke},
title = {Perceptually-based foveated virtual reality},
booktitle = {{SIGGRAPH}},
year = {2016},
publisher = {{ACM}},
doi = {10.1145/2929464.2929472},
}
@InProceedings{PaulaVeronese2012,
author = {Lucas de Paula Veronese and Lauro Jos\'e Lyrio Junior and Filipe Wall Mutz and Jorcy de Oliveira Neto and Vitor Barbirato Azevedo and Mariella Berger and Alberto Ferreira De Souza and Claudine Badue},
title = {Stereo matching with {VG-RAM} Weightless Neural Networks},
booktitle = {{12th International Conference on Intelligent Systems Design and Applications}},
year = {2012},
editor = {Ajith Abraham and Albert Zomaya and Sebastian Ventura and Ronald Yager and Vaclav Snasel and Azah Kamilah Muda and Philip Samuel},
pages = {309--314},
address = {Kochi, India},
month = nov,
publisher = {IEEE},
abstract = {Virtual Generalizing Random Access Memory Weightless Neural Networks (VG-RAM WNN) is an effective machine learning technique that offers simple implementation and fast training and test. We examined the performance of VG-RAM WNN on binocular dense stereo matching using the Middlebury Stereo Datasets. Our experimental results showed that, even without tackling occlusions and discontinuities in the stereo image pairs examined, our VG-RAM WNN architecture for stereo matching was able to rank at 114th position in the Middlebury Stereo Evaluation system. This result is promising, because the difference in performance among approaches ranked in distinct positions is very small.},
doi = {10.1109/ISDA.2012.6416556},
keywords = {Binocular Dense Stereo Matching, VG-RAM Weightless Neural Networks, Middlebury Stereo Vision Page},
url = {https://www.researchgate.net/publication/261468052_Stereo_matching_with_VG-RAM_Weightless_Neural_Networks},
}
@MastersThesis{Periketi2011,
author = {Prashanth Rao Periketi},
title = {Gaze estimation using sclera and iris extraction},
school = {University of Kentucky},
year = {2011},
url = {http://www.vis.uky.edu/~cheung/doc/thesis-Prashanth.pdf},
}
@Misc{Rossignol2017,
author = {Joe Rossignol},
title = {Apple Acquires German Eye Tracking Firm {SensoMotoric Instruments}},
howpublished = {https://www.macrumors.com/2017/06/26/apple-acquires-sensomotoric-instruments/},
month = jun,
year = {2017},
note = {Accessed: 2018-01-27},
url = {https://www.macrumors.com/2017/06/26/apple-acquires-sensomotoric-instruments/},
}
@InProceedings{Roussos2012,
author = {Anastasios Roussos and Chris Russell and Ravi Garg and Lourdes Agapito},
title = {{Dense Multibody Motion Estimation and Reconstruction from a Handheld Camera}},
booktitle = {2013 IEEE International Symposium on Mixed and Augmented Reality},
year = {2012},
pages = {31--40},
address = {Atlanta, GA, USA},
month = nov,
organization = {IEEE},
publisher = {Piscataway},
abstract = {Existing approaches to camera tracking and reconstruction from a single handheld camera for Augmented Reality (AR) focus on the reconstruction of static scenes. However, most real world scenarios are dynamic and contain multiple independently moving rigid objects. This paper addresses the problem of simultaneous segmentation, motion estimation and dense 3D reconstruction of dynamic scenes. We propose a dense solution to all three elements of this problem: depth estimation, motion label assignment and rigid transformation estimation directly from the raw video by optimizing a single cost function using a hill-climbing approach. We do not require prior knowledge of the number of objects present in the scene -- the number of independent motion models and their parameters are automatically estimated. The resulting inference method combines the best techniques in discrete and continuous optimization: a state of the art variational approach is used to estimate the dense depth maps while the motion segmentation is achieved using discrete graph-cut based optimization. For the rigid motion estimation of the independently moving objects we propose a novel tracking approach designed to cope with the small fields of view they induce and agile motion. Our experimental results on real sequences show how accurate segmentations and dense depth maps can be obtained in a completely automated way and used in marker-free AR applications.},
doi = {10.1109/ISMAR.2012.6402535},
keywords = {Cameras, Optimization, Motion segmentation, Image reconstruction, Tracking, Estimation, Motion estimation},
url = {https://www.researchgate.net/publication/233981640_Dense_Multibody_Motion_Estimation_and_Reconstruction_from_a_Handheld_Camera},
}
@InProceedings{Sagonas2013,
author = {Christos Sagonas and Georgios Tzimiropoulos and Stefanos Zafeiriou and Maja Pantic},
title = {300 {Faces in-the-Wild Challenge}: The first facial landmark localization Challenge},
booktitle = {{IEEE International Conference on Computer Vision}},
year = {2013},
publisher = {{IEEE}},
abstract = {Automatic facial point detection plays arguably the most
important role in face analysis. Several methods have been
proposed which reported their results on databases of both
constrained and unconstrained conditions. Most of these
databases provide annotations with different mark-ups and
in some cases the are problems related to the accuracy of
the fiducial points. The aforementioned issues as well as the
lack of a evaluation protocol makes it difficult to compare
performance between different systems. In this paper, we
present the 300 Faces in-the-Wild Challenge: The first fa-
cial landmark localization Challenge which is held in con-
junction with the International Conference on Computer Vi-
sion 2013, Sydney, Australia. The main goal of this chal-
lenge is to compare the performance of different methods
on a new-collected dataset using the same evaluation pro-
tocol and the same mark-up and hence to develop the first
standardized benchmark for facial landmark localization.},
journal = {ICCV},
url = {https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf},
}
@Article{Sagonas2016,
author = {Christos Sagonas and Epameinondas Antonakos and Georgios Tzimiropoulos and Stefanos Zafeiriou and Maja Pantic},
title = {{300 Faces In-The-Wild Challenge}: database and results},
journal = {Image and Vision Computing},
year = {2016},
volume = {47},
pages = {3--18},
note = {Special Issue on Facial Landmark Localisation ``In-The-Wild''},
abstract = {Computer Vision has recently witnessed great research advance towards automatic facial points detection. Numerous methodologies have been proposed during the last few years that achieve accurate and efficient performance. However, fair comparison between these methodologies is infeasible mainly due to two issues. (a) Most existing databases, captured under both constrained and unconstrained (in-the-wild) conditions have been annotated using different mark-ups and, in most cases, the accuracy of the annotations is low. (b) Most published works report experimental results using different training/testing sets, different error met- rics and, of course, landmark points with semantically different locations. In this paper, we aim to overcome the aforementioned problems by (a) proposing a semi-automatic annotation technique that was employed to re-annotate most existing facial databases under a unified protocol, and (b) presenting the 300 Faces In- The-Wild Challenge (300-W), the first facial landmark localization challenge that was organized twice, in 2013 and 2015. To the best of our knowledge, this is the first effort towards a unified annotation scheme of massive databases and a fair experimental comparison of existing facial landmark localization systems. The images and annotations of the new testing database that was used in the 300-W challenge are available from http://ibug.doc.ic.ac.uk/resources/300-W_IMAVIS/.},
doi = {10.1016/j.imavis.2016.01.002},
keywords = {Facial landmark localization, Challenge, Semi-automatic annotation tool, Facial database},
url = {https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/},
}
@InProceedings{Schöning2016,
author = {Julius Schöning and Patrick Faion and Gunther Heidemann and Ulf Krumnack},
title = {Eye tracking data in multimedia containers for instantaneous visualizations},
booktitle = {{IEEE Workshop on Eye Tracking and Visualization}},
year = {2016},
month = {oct},
publisher = {{IEEE}},
doi = {10.1109/ETVIS.2016.7851171},
}
@InCollection{Shelhamer2010,
author = {Mark Shelhamer and Dale C. Roberts},
title = {Magnetic scleral search coil},
booktitle = {{Vertigo and Imbalance: Clinical Neurophysiology of the Vestibular System}},
publisher = {Elsevier},
year = {2010},
pages = {80--87},
doi = {10.1016/s1567-4231(10)09006-4},
}
@Article{Sirohey2001,
author = {Saad A. Sirohey and Azriel Rosenfeld},
title = {Eye detection in a face image using linear and nonlinear filters},
journal = {Pattern Recognition},
year = {2001},
volume = {34},
number = {7},
pages = {1367--1391},
month = {jan},
doi = {10.1016/s0031-3203(00)00082-0},
publisher = {Elsevier {BV}},
}
@InProceedings{Soltany2011,
author = {Milad Soltany and Saeid Toosi Zadeh and Hamid-Reza Pourreza},
title = {Fast and Accurate Pupil Positioning Algorithm using Circular {H}ough Transform and Gray Projection},
booktitle = {{Computer Science and Information Technology}},
year = {2011},
url = {http://www.ipcsit.com/vol5/102-ICCCM2011-C094.pdf},
}
@InProceedings{Stevens2017,
author = {Marc Stevens and Elie Bursztein and Pierre Karpman and Ange Albertini and Yarik Markov},
title = {The first collision for full {SHA-1}},
booktitle = {Advances in Cryptology – CRYPTO 2017},
year = {2017},
editor = {J. Katz and H. Shacham},
volume = {10401},
series = {Lecture Notes in Computer Science},
publisher = {Springer, Cham},
abstract = {SHA-1 is a widely used 1995 NIST cryptographic hash function standard that was officially deprecated by NIST in 2011 due to fundamental security weaknesses demonstrated in various analyses and theoretical attacks. Despite its deprecation, SHA-1 remains widely used in 2017 for document and TLS certificate signatures, and also in many software such as the GIT versioning system for integrity and backup purposes.
A key reason behind the reluctance of many industry players to replace SHA-1 with a safer alternative is the fact that finding an actual collision has seemed to be impractical for the past eleven years due to the high complexity and computational cost of the attack.
In this paper, we demonstrate that SHA-1 collision attacks have finally become practical by providing the first known instance of a collision. Furthermore, the prefix of the colliding messages was carefully chosen so that they allow an attacker to forge two PDF documents with the same SHA-1 hash yet that display arbitrarily-chosen distinct visual contents.
We were able to find this collision by combining many special cryptanalytic techniques in complex ways and improving upon previous work. In total the computational effort spent is equivalent to $2^{63.1}$ SHA-1 compressions and took approximately 6 500 CPU years and 100 GPU years. As a result while the computational power spent on this collision is larger than other public cryptanalytic computations, it is still more than 100 000 times faster than a brute force search.},
doi = {10.1007/978-3-319-63688-7_19},
keywords = {hash function, cryptanalysis, collision attack, collision example, differential path},
url = {https://shattered.it},
}
@Misc{stosurvey2017,
author = {Kevin Troy},
title = {stackoverflow Developer Survey 2017},
howpublished = {https://stackoverflow.blog/2017/03/22/now-live-stack-overflow-developer-survey-2017-results/},
month = mar,
year = {2017},
note = {Accessed: 2018-01-01},
url = {https://insights.stackoverflow.com/survey/2017},
}
@InBook{Swennen2006,
chapter = {{3-D} Cephalometric Soft Tissue Landmarks},
pages = {183--226},
title = {Three-Dimensional Cephalometry},
publisher = {Springer},
year = {2006},
author = {Gwen R. J. Swennen},
editor = {Gwen R. J. Swennen and Filip A. C. Schutyser and Jarg-Erich Hausamen},
isbn = {978-3-540-29011-7},
doi = {10.1007/3-540-29011-7_4},
}
@InProceedings{Timm2011,
author = {Fabian Timm and Erhardt Barth},
title = {Accurate eye centre localisation by means of gradients},
booktitle = {{International Conference on Computer Vision Theory and Applications}},
year = {2011},
editor = {Leonid Mestetskiy and Jos\'e Braz},
volume = {1},
series = {Proceedings of the International Conference on Computer Vision Theory and Applications},
pages = {125--130},
month = mar,
organization = {INSTICC},
publisher = {SciTePress},
abstract = {The estimation of the eye centres is used in several computer vision applications such as face recognition or eye tracking. Especially for the latter, systems that are remote and rely on available light have become very popular and several methods for accurate eye centre localisation have been proposed. Nevertheless, these methods often fail to accurately estimate the eye centres in difficult scenarios, e.g. low resolution, low contrast, or occlusions. We therefore propose an approach for accurate and robust eye centre localisation by using image gradients. We derive a simple objective function, which only consists of dot products. The maximum of this function corresponds to the location where most gradient vectors intersect and thus to the eye's centre. Although simple, our method is invariant to changes in scale, pose, contrast and variations in illumination. We extensively evaluate our method on the very challenging BioID database for eye centre and iris localisation. Moreover, we compare our method with a wide range of state of the art methods and demonstrate that our method yields a significant improvement regarding both accuracy and robustness.},
doi = {10.5220/0003326101250130},
keywords = {Eye centre localisation, pupil and iris localisation, image gradients, feature extraction, shape analysis},
url = {http://www.inb.uni-luebeck.de/fileadmin/files/PUBPDFS/TiBa11b.pdf},
}
@InProceedings{Vatahska2007,
author = {Teodora Vatahska and Maren Bennewitz and Sven Behnke},
title = {{Feature-based Head Pose Estimation from Images}},
booktitle = {7th IEEE-RAS International Conference on Humanoid Robots},
year = {2007},
month = nov,
publisher = {IEEE},
abstract = {Estimating the head pose is an important capability of a robot when interacting with humans since the head pose usually indicates the focus of attention. In this paper, we present a novel approach to estimate the head pose from monocular images. Our approach proceeds in three stages. First, a face detector roughly classifies the pose as frontal, left, or right profile. Then, classifiers trained with AdaBoost using Haar-like features, detect distinctive facial features such as the nose tip and the eyes. Based on the positions of these features, a neural network finally estimates the three continuous rotation angles we use to model the head pose. Since we have a compact representation of the face using only few distinctive features, our approach is computationally highly efficient. As we show in experiments with standard databases as well as with real-time image data, our system locates the distinctive features with a high accuracy and provides robust estimates of the head pose.},
doi = {10.1109/ICHR.2007.4813889},
keywords = {Head, Face detection, Human robot interaction, Focusing, Detectors, Computer vision, Facial features, Nose, Eyes, Neural Networks},
url = {http://hrl.informatik.uni-freiburg.de/papers/vatahska07humanoids.pdf},
}
@MastersThesis{Vilks2017,
author = {Aline Vilks},
title = {{Faces in the Wild} -- Locating Faces in Mobile Eye Tracking Videos},
school = {Osnabrück University},
year = {2017},
}
@InProceedings{Viola2001,
author = {Paul Viola and Michael Jones},
title = {Rapid Object Detection using a Boosted Cascade of Simple Features},
booktitle = {{IEEE Conference on Computer Vision and Pattern Recognition}},
year = {2001},
publisher = {IEEE},
doi = {10.1109/CVPR.2001.990517},
}
@InCollection{Wedel2008,
author = {Michel Wedel and Rik Pieters},
title = {A Review of Eye-Tracking Research in Marketing},
booktitle = {{Review of Marketing Research}},
publisher = {Emerald Group Publishing Limited},
year = {2008},
pages = {123--147},
month = {jan},
doi = {10.1108/s1548-6435(2008)0000004009},
}
@Article{Wikipedia:aov,
author = {{Wikipedia contributors}},
title = {Angle of view},
journal = {{Wikipedia{,} The Free Encyclopedia}},
year = {2017},
note = {\url{https://en.wikipedia.org/wiki/Angle_of_view}. Accessed: 2018-01-19},
url = {https://en.wikipedia.org/wiki/Angle_of_view},
}
@Article{Wikipedia:lineplaneintersection,
author = {{Wikipedia contributors}},
title = {Line--plane intersection},
journal = {{Wikipedia{,} The Free Encyclopedia}},
year = {2017},
note = {\url{https://en.wikipedia.org/wiki/Line-plane_intersection}. Accessed: 2018-01-15},
url = {https://en.wikipedia.org/wiki/Line%E2%80%93plane_intersection},
}
@Article{Wikipedia:lm,
author = {{Wikipedia contributors}},
title = {Levenberg--Marquardt algorithm},
journal = {{Wikipedia{,} The Free Encyclopedia}},
year = {2018},
note = {\url{https://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm}. Accessed: 2018-01-15},
url = {https://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm},
}
@InProceedings{Xiong2014,
author = {Chunshui Xiong and Lei Huang and Changping Liu},
title = {Calibration-free gaze tracking for automatic measurement of visual acuity in human infants},
booktitle = {{36th International Conference on Engineering in Medicine and Biology Society}},
year = {2014},
month = {aug},
publisher = {{IEEE}},
doi = {10.1109/embc.2014.6943752},
}
@Book{Yarbus1967,
title = {Eye Movements and Vision},
publisher = {Plenum Press},
year = {1967},
author = {Alfred L. Yarbus},
editor = {Lorrin A. Riggs},
url = {http://wexler.free.fr/library/files/yarbus%20%281967%29%20eye%20movements%20and%20vision.pdf},
}
@InProceedings{Zhou2013,
author = {Erjin Zhou and Haoqiang Fan and Zhimin Cao and Yuning Jiang and Qi Yin},
title = {Extensive Facial Landmark Localization with Coarse-to-Fine Convolutional Network Cascade},
booktitle = {{IEEE International Conference on Computer Vision Workshops}},
year = {2013},
month = {dec},
publisher = {{IEEE}},
doi = {10.1109/iccvw.2013.58},
}
@Comment{jabref-meta: databaseType:bibtex;}
@Comment{jabref-meta: saveOrderConfig:specified;bibtexkey;false;author;false;year;false;}