Skip to content

Latest commit

 

History

History
1396 lines (1346 loc) · 40.5 KB

datasets.md

File metadata and controls

1396 lines (1346 loc) · 40.5 KB

Home  | Behavioral  | Applications  | Datasets  

Click on each entry below to see additional information.

HOIST | paper | link
    Full name: Object Importance Estimation Using Counterfactual Reasoning
    Description: Simulated driving scenarios with object importance annotations
    Data: scene video (BEV)
    Annotations: bounding boxes, object importance labels
    @article{2024_RAL_Gupta,
        author = "Gupta, Pranay and Biswas, Abhijat and Admoni, Henny and Held, David",
        journal = "IEEE Robotics and Automation Letters",
        publisher = "IEEE",
        title = "Object Importance Estimation using Counterfactual Reasoning for Intelligent Driving",
        year = "2024"
    }
    

SCOUT | paper | link
    Full name: Task and Context-Modulated Attention
    Description: Extended annotations for four public datasets for studying drivers’ attention: DR(eye)VE, BDD-A, MAAD, LBW
    Data: eye-tracking
    Annotations: action labels, context labels, map information
    @inproceedings{2024_IV_Kotseruba_1,
        author = "Kotseruba, Iuliia and Tsotsos, John K",
        booktitle = "Intelligent Vehicles Symposium (IV)",
        title = "Data Limitations for Modeling Top-Down Effects on Drivers' Attention",
        year = "2024"
    }
    

IVGaze | paper | link
    Full name: In-Vehicle Gaze Dataset
    Description: 44K images of 25 subjects looking at different areas inside the vehicle
    Data: driver video, eye-tracking
    Annotations: gaze area labels
    @inproceedings{2024_CVPR_Cheng,
        author = "Cheng, Yihua and Zhu, Yaning and Wang, Zongji and Hao, Hongquan and Liu, Yongwei and Cheng, Shiqing and Wang, Xi and Chang, Hyung Jin",
        booktitle = "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition",
        pages = "1556--1565",
        title = "What Do You See in Vehicle? Comprehensive Vision Solution for In-Vehicle Gaze Estimation",
        year = "2024"
    }
    

DRAMA | paper | link
    Full name: Driving Risk Assessment Mechanism with A captioning module
    Description: Driving scenarios recorded in Tokyo, Japan with video and object-level importance labels and captions
    Data: scene video
    Annotations: bounding boxes, captions
    @inproceedings{2023_WACV_Malla,
        author = "Malla, Srikanth and Choi, Chiho and Dwivedi, Isht and Choi, Joon Hee and Li, Jiachen",
        booktitle = "Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision",
        pages = "1043--1052",
        title = "DRAMA: Joint Risk Localization and Captioning in Driving",
        year = "2023"
    }
    

SAM-DD | paper | link
    Full name: Singapore AutoMan@NTU Distracted Driving Dataset
    Description: Videos of drivers performing secondary tasks
    Data: driver video, depth
    Annotations: distraction state
    @article{2023_T-ITS_Yang,
        author = "Yang, Haohan and Liu, Haochen and Hu, Zhongxu and Nguyen, Anh-Tu and Guerra, Thierry-Marie and Lv, Chen",
        journal = "IEEE Transactions on Intelligent Transportation Systems",
        publisher = "IEEE",
        title = "Quantitative Identification of Driver Distraction: A Weakly Supervised Contrastive Learning Approach",
        year = "2023"
    }
    

100-Driver | paper | link
    Description: Videos of drivers performing secondary tasks
    Data: driver video
    Annotations: action labels
    @article{2023_T-ITS_Wang,
        author = "Wang, Jing and Li, Wenjing and Li, Fang and Zhang, Jun and Wu, Zhongcheng and Zhong, Zhun and Sebe, Nicu",
        journal = "IEEE Transactions on Intelligent Transportation Systems",
        publisher = "IEEE",
        title = "100-Driver: A Large-Scale, Diverse Dataset for Distracted Driver Classification",
        year = "2023"
    }
    

DrFixD-night | paper | link
    Full name: Driver Fixation Dataset in night
    Description: 15 videos of night-time driving with eye-tracking data from 30 participants
    Data: scene video, eye-tracking
    @article{2023_T-ITS_Deng,
        author = "Deng, Tao and Jiang, Lianfang and Shi, Yi and Wu, Jiang and Wu, Zhangbi and Yan, Shun and Zhang, Xianshi and Yan, Hongmei",
        journal = "IEEE Transactions on Intelligent Transportation Systems",
        publisher = "IEEE",
        title = "Driving Visual Saliency Prediction of Dynamic Night Scenes via a Spatio-Temporal Dual-Encoder Network",
        year = "2023"
    }
    

AIDE | paper | link
    Full name: Assistive Driving Perception Dataset
    Description: Naturalistic dataset with multi-camera views of drivers performing normal driving and secondary tasks
    Data: driver video, scene video
    Annotations: distraction state, action labels
    @inproceedings{2023_ICCV_Yang,
        author = "Yang, Dingkang and Huang, Shuai and Xu, Zhi and Li, Zhenpeng and Wang, Shunli and Li, Mingcheng and Wang, Yuzheng and Liu, Yang and Yang, Kun and Chen, Zhaoyu and others",
        booktitle = "Proceedings of the IEEE/CVF International Conference on Computer Vision",
        pages = "20459--20470",
        title = "AIDE: A Vision-Driven Multi-View, Multi-Modal, Multi-Tasking Dataset for Assistive Driving Perception",
        year = "2023"
    }
    

SynDD1 | paper | link
    Full name: Synthetic Distracted Driving Dataset
    Description: Synthetic dataset for machine learning models to detect and analyze drivers' various distracted behavior and different gaze zones.
    Data: driver video
    Annotations: gaze area labels, action labels, appearance labels
    @article{2023_DiB_Rahman,
        author = "Rahman, Mohammed Shaiqur and Venkatachalapathy, Archana and Sharma, Anuj and Wang, Jiyang and Gursoy, Senem Velipasalar and Anastasiu, David and Wang, Shuo",
        journal = "Data in brief",
        pages = "108793",
        publisher = "Elsevier",
        title = "Synthetic distracted driving (syndd1) dataset for analyzing distracted behaviors and various gaze zones of a driver",
        volume = "46",
        year = "2023"
    }
    

AI CITY NDAR | paper | link
    Full name: AI CITY Naturalistic Driving Action Recognition
    Description: 594 video clips (90 hours) of 99 drivers performing 16 secondary tasks during driving
    Data: driver video
    @inproceedings{2023_CVPRW_Naphade,
        author = "Naphade, Milind and Wang, Shuo and Anastasiu, David C and Tang, Zheng and Chang, Ming-Ching and Yao, Yue and Zheng, Liang and Rahman, Mohammed Shaiqur and Arya, Meenakshi S and Sharma, Anuj and others",
        booktitle = "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition",
        pages = "5538--5548",
        title = "The 7th ai city challenge",
        year = "2023"
    }
    

Fatigueview | paper | link
    Description: Multi-camera video dataset for vision-based drowsiness detection.
    Data: driver video
    Annotations: facial landmarks, face/hand bounding boxes, head pose, eye status, pose, drowsiness labels
    @article{2022_T-ITS_Yang,
        author = "Yang, Cong and Yang, Zhenyu and Li, Weiyu and See, John",
        journal = "IEEE Transactions on Intelligent Transportation Systems",
        publisher = "IEEE",
        title = "FatigueView: A Multi-Camera Video Dataset for Vision-Based Drowsiness Detection",
        year = "2022"
    }
    

CoCAtt | paper | link
    Full name: A Cognitive-Conditioned Driver Attention Dataset
    Description: Videos of drivers and driver scenes in automated and manual driving conditions with per-frame gaze and distraction annotations
    Data: driver video, scene video, eye-tracking
    Annotations: distraction state, car telemetry, intention labels
    @inproceedings{2022_ITSC_Shen,
        author = "Shen, Yuan and Wijayaratne, Niviru and Sriram, Pranav and Hasan, Aamir and Du, Peter and Driggs-Campbell, Katherine",
        booktitle = "2022 IEEE 25th International Conference on Intelligent Transportation Systems (ITSC)",
        organization = "IEEE",
        pages = "32--39",
        title = "CoCAtt: A Cognitive-Conditioned Driver Attention Dataset",
        year = "2022"
    }
    

LBW | paper | link
    Full name: Look Both Ways
    Description: Synchronized videos from scene and driver-facing cameras of drivers performing various maneuvers in traffic
    Data: driver video, scene video, eye-tracking
    @inproceedings{2022_ECCV_Kasahara,
        author = "Kasahara, Isaac and Stent, Simon and Park, Hyun Soo",
        booktitle = "Computer Vision--ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23--27, 2022, Proceedings, Part XIII",
        organization = "Springer",
        pages = "126--142",
        title = "Look Both Ways: Self-supervising Driver Gaze Estimation and Road Scene Saliency",
        year = "2022"
    }
    

DAD | paper | link
    Full name: Driver Anomaly Detection
    Description: Videos of normal and anomalous behaviors (manual/visual distractions) of drivers.
    Data: driver video
    Annotations: action labels
    @inproceedings{2021_WACV_Kopuklu,
        author = "Kopuklu, Okan and Zheng, Jiapeng and Xu, Hang and Rigoll, Gerhard",
        booktitle = "Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision",
        pages = "91--100",
        title = "Driver anomaly detection: A dataset and contrastive learning approach",
        year = "2021"
    }
    

MAAD | paper | link
    Full name: Attended Awareness in Driving
    Description: A subset of videos from DR(eye)VE annotated with gaze collected in lab conditions.
    Data: eye-tracking, scene video
    Annotations: task labels
    @inproceedings{2021_ICCVW_Gopinath,
        author = "Gopinath, Deepak and Rosman, Guy and Stent, Simon and Terahata, Katsuya and Fletcher, Luke and Argall, Brenna and Leonard, John",
        booktitle = "Proceedings of the IEEE/CVF International Conference on Computer Vision",
        pages = "3426--3436",
        title = {MAAD: A Model and Dataset for" Attended Awareness" in Driving},
        year = "2021"
    }
    

DGW | paper | link
    Full name: Driver Gaze in the Wild
    Description: Videos of drivers fixating on different areas in the vehicle without constraining their head and eye movements
    Data: driver video
    Annotations: gaze area labels
    @inproceedings{2021_ICCVW_Ghosh,
        author = "Ghosh, Shreya and Dhall, Abhinav and Sharma, Garima and Gupta, Sarthak and Sebe, Nicu",
        booktitle = "ICCVW",
        title = "Speak2label: Using domain knowledge for creating a large scale driver gaze zone estimation dataset",
        year = "2021"
    }
    

55 Rides | paper | link
    Description: Naturalistic dataset recorded by four drivers and annotated by three raters to determine distraction states
    Data: driver video, eye-tracking
    Annotations: distraction state, head pose
    @inproceedings{2021_ETRA_Kubler,
        author = {K{\"u}bler, Thomas C and Fuhl, Wolfgang and Wagner, Elena and Kasneci, Enkelejda},
        booktitle = "ACM Symposium on Eye Tracking Research and Applications",
        pages = "1--8",
        title = "55 Rides: attention annotated head and gaze data during naturalistic driving",
        year = "2021"
    }
    

TrafficSaliency | paper | link
    Description: 16 videos of driving scenes with gaze data of 28 subjects recorded in the lab with eye-tracker
    Data: eye-tracking, scene video
    @article{2020_T-ITS_Deng,
        author = "Deng, Tao and Yan, Hongmei and Qin, Long and Ngo, Thuyen and Manjunath, BS",
        journal = "IEEE Transactions on Intelligent Transportation Systems",
        number = "5",
        pages = "2146--2154",
        publisher = "IEEE",
        title = "{How do drivers allocate their potential attention? Driving fixation prediction via convolutional neural networks}",
        volume = "21",
        year = "2019"
    }
    

NeuroIV | paper | link
    Full name: Neuromorphic Vision Meets Intelligent Vehicle
    Description: Videos of drivers performing secondary tasks, making hand gestures and observing different regions inside the vehicle recorded with DAVIS and depth sensor
    Data: driver video
    @article{2020_T-ITS_Chen,
        author = {Chen, Guang and Wang, Fa and Li, Weijun and Hong, Lin and Conradt, J{\"o}rg and Chen, Jieneng and Zhang, Zhenyan and Lu, Yiwen and Knoll, Alois},
        journal = "IEEE Transactions on Intelligent Transportation Systems",
        number = "2",
        pages = "1171--1183",
        publisher = "IEEE",
        title = "NeuroIV: Neuromorphic vision meets intelligent vehicle towards safe driving with a new database and baseline evaluations",
        volume = "23",
        year = "2020"
    }
    

LISA v2 | paper | link
    Full name: Laboratory for Intelligent and Safe Automobiles
    Description: Videos of drivers with and without eyeglasses recorded under different lighting conditions
    Data: driver video
    @inproceedings{2020_IV_Rangesh,
        author = "Rangesh, Akshay and Zhang, Bowen and Trivedi, Mohan M",
        booktitle = "IV",
        title = "Driver gaze estimation in the real world: Overcoming the eyeglass challenge",
        year = "2020"
    }
    

DGAZE | paper | link
    Description: A dataset mapping drivers’ gaze to different areas in a static traffic scene in lab conditions
    Data: driver video, scene video
    Annotations: bounding boxes
    @inproceedings{2020_IROS_Dua,
        author = "Dua, Isha and John, Thrupthi Ann and Gupta, Riya and Jawahar, CV",
        booktitle = "IROS",
        title = "DGAZE: Driver Gaze Mapping on Road",
        year = "2020"
    }
    

DMD | paper | link
    Full name: Driving Monitoring Dataset
    Description: A diverse multi-modal dataset of drivers performing various secondary tasks, observing different regions inside the car, and showing signs of drowsiness recorded on-road and in simulation environment
    Data: driver video, scene video, vehicle data
    Annotations: bounding boxes, action labels
    @inproceedings{2020_ECCVW_Ortega,
        author = "Ortega, Juan Diego and Kose, Neslihan and Ca{\\textasciitilde n}as, Paola and Chao, Min-An and Unnervik, Alexander and Nieto, Marcos and Otaegui, Oihana and Salgado, Luis",
        booktitle = "ECCV",
        title = "Dmd: A large-scale multi-modal driver monitoring dataset for attention and alertness analysis",
        year = "2020"
    }
    

EBDD | paper | link
    Full name: EEE BUET Distracted Driving Dataset
    Description: Videos of drivers performing secondary tasks
    Data: driver video
    Annotations: action labels, bounding boxes
    @article{2019_TCSVT_Billah,
        author = "Billah, Tashrif and Rahman, SM Mahbubur and Ahmad, M Omair and Swamy, MNS",
        journal = "IEEE Transactions on Circuits and Systems for Video Technology",
        number = "4",
        pages = "1048--1062",
        publisher = "IEEE",
        title = "Recognizing distractions for assistive driving by tracking body parts",
        volume = "29",
        year = "2018"
    }
    

PRORETA 4 | paper | link
    Description: Videos of traffic scenes recorded in instrumented vehicle with driver’s gaze data for evaluating accuracy of detecting driver’s current object of fixation
    Data: eye-tracking, driver video, scene video
    @inproceedings{2019_IV_Schwehr,
        author = "Schwehr, Julian and Knaust, Moritz and Willert, Volker",
        booktitle = "IV",
        title = "How to evaluate object-of-fixation detection",
        year = "2019"
    }
    

DADA-2000 | paper | link
    Full name: Driver Attention in Driving Accident Scenarios
    Description: 2000 videos of accident videos collected from video hosting websites with eye-tracking data from 20 subjects collected in the lab.
    Data: eye-tracking, scene video
    Annotations: bounding boxes, accident category labels
    @inproceedings{2019_ITSC_Fang,
        author = "Fang, Jianwu and Yan, Dingxin and Qiao, Jiahuan and Xue, Jianru and Wang, He and Li, Sen",
        booktitle = "ITSC",
        title = "{DADA-2000: Can Driving Accident be Predicted by Driver Attentionƒ Analyzed by A Benchmark}",
        year = "2019"
    }
    

H3D | paper | link
    Full name: H3D Honda 3D Dataset
    Description: A subset of videos from HDD dataset with 3D bounding boxes and object ids for tracking
    Data: driver video, vehicle data
    Annotations: bounding boxes
    @inproceedings{2019_ICRA_Patil,
        author = "Patil, Abhishek and Malla, Srikanth and Gang, Haiming and Chen, Yi-Ting",
        booktitle = "2019 International Conference on Robotics and Automation (ICRA)",
        organization = "IEEE",
        pages = "9552--9557",
        title = "The h3d dataset for full-surround 3d multi-object detection and tracking in crowded urban scenes",
        year = "2019"
    }
    

Drive&Act | paper | link
    Description: Videos of drivers performing various driving- and non-driving-related tasks
    Data: driver video
    Annotations: semantic maps, action labels
    @inproceedings{2019_ICCV_Martin,
        author = "Martin, Manuel and Roitberg, Alina and Haurilet, Monica and Horne, Matthias and Rei{\ss}, Simon and Voit, Michael and Stiefelhagen, Rainer",
        booktitle = "ICCV",
        title = "Drive\\&act: A multi-modal dataset for fine-grained driver behavior recognition in autonomous vehicles",
        year = "2019"
    }
    

RLDD | paper | link
    Full name: Real-Life Drowsiness Datase
    Description: Crowdsourced videos of people in various states of drowsiness recorded in indoor environments
    Data: driver video
    Annotations: drowsiness labels
    @inproceedings{2019_CVPRW_Ghoddoosian,
        author = "Ghoddoosian, Reza and Galib, Marnim and Athitsos, Vassilis",
        booktitle = "CVPRW",
        title = "A realistic dataset and baseline temporal model for early drowsiness detection",
        year = "2019"
    }
    

HAD | paper | link
    Full name: HAD HRI Advice Dataset
    Description: A subset of videos from HDD naturalistic dataset annotated with textual advice containing 1) goals – where the vehicle should move and 2) attention – where the vehicle should look
    Data: scene video, vehicle data
    Annotations: goal and attention labels
    @inproceedings{2019_CVPR_Kim,
        author = "Kim, Jinkyu and Misu, Teruhisa and Chen, Yi-Ting and Tawari, Ashish and Canny, John",
        booktitle = "CVPR",
        title = "Grounding human-to-vehicle advice for self-driving vehicles",
        year = "2019"
    }
    

3MDAD | paper | link
    Full name: Multimodal Multiview and Multispectral Driver Action Dataset
    Description: Videos of drivers performing secondary tasks
    Data: driver video
    Annotations: action labels, bounding boxes
    @inproceedings{2019_CAIP_Jegham,
        author = "Jegham, Imen and Ben Khalifa, Anouar and Alouani, Ihsen and Mahjoub, Mohamed Ali",
        booktitle = "Computer Analysis of Images and Patterns: 18th International Conference, CAIP 2019, Salerno, Italy, September 3--5, 2019, Proceedings, Part I 18",
        organization = "Springer",
        pages = "518--529",
        title = "Mdad: A multimodal and multiview in-vehicle driver action dataset",
        year = "2019"
    }
    

DR(eye)VE | paper | link
    Description: Driving videos recorded on-road with corresponding gaze data of the driver
    Data: eye-tracking, scene video, vehicle data
    Annotations: weather and road type labels
    @article{2018_PAMI_Palazzi,
        author = "Palazzi, Andrea and Abati, Davide and Solera, Francesco and Cucchiara, Rita and others",
        journal = "IEEE TPAMI",
        number = "7",
        pages = "1720--1733",
        title = "{Predicting the Driver's Focus of Attention: the DR (eye) VE Project}",
        volume = "41",
        year = "2018"
    }
    

BDD-X | paper | link
    Full name: Berkeley Deep Drive-X (eXplanation) Dataset
    Description: A subset of videos from BDD dataset annotated with textual descriptions of actions performed by the vehicle and explanations justifying those actions
    Data: scene video, vehicle data
    Annotations: action explanations
    @inproceedings{2018_ECCV_Kim,
        author = "Kim, Jinkyu and Rohrbach, Anna and Darrell, Trevor and Canny, John and Akata, Zeynep",
        booktitle = "ECCV",
        title = "Textual explanations for self-driving vehicles",
        year = "2018"
    }
    

HDD | paper | link
    Full name: HDD HRI Driving Dataset
    Description: A large naturalistic driving dataset with driving footage, vehicle telemetry and annotations for vehicle actions and their justifications
    Data: scene video, vehicle data
    Annotations: bounding boxes, action labels
    @inproceedings{2018_CVPR_Ramanishka,
        author = "Ramanishka, Vasili and Chen, Yi-Ting and Misu, Teruhisa and Saenko, Kate",
        booktitle = "CVPR",
        title = "Toward driving scene understanding: A dataset for learning driver behavior and causal reasoning",
        year = "2018"
    }
    

BDD-A | paper | link
    Full name: Berkeley Deep Drive-A (Attention) Dataset
    Description: A set of short video clips extracted from the Berkeley Deep Drive (BDD) dataset with additional eye-tracking data collected in the lab from 45 subjects
    Data: eye-tracking, scene video, vehicle data
    @inproceedings{2018_ACCV_Xia,
        author = "Xia, Ye and Zhang, Danqing and Kim, Jinkyu and Nakayama, Ken and Zipser, Karl and Whitney, David",
        booktitle = "ACCV",
        title = "Predicting driver attention in critical situations",
        year = "2018"
    }
    

AUCD2 | paper | link
    Full name: American University in Cairo (AUC) Distracted Driver’s Dataset
    Description: Videos of drivers performing secondary tasks
    Data: driver video
    Annotations: action labels
    @inproceedings{2017_NeurIPS_Abouelnaga,
        author = "Abouelnaga, Yehya and Eraqi, Hesham M. and Moustafa, Mohamed N.",
        booktitle = "NeurIPS Workshop on Machine Learning for Intelligent Transportation Systems",
        title = "eal-time Distracted Driver Posture Classification",
        year = "2017"
    }
    

C42CN | paper | link
    Description: A multi-modal dataset acquired in a controlled experiment on a driving simulator under 4 conditions: no distraction, cognitive, emotional and sensorimotor distraction.
    Data: eye-tracking, scene video, physiological signal
    @article{2017_NatSciData_Taamneh,
        author = "Taamneh, Salah and Tsiamyrtzis, Panagiotis and Dcosta, Malcolm and Buddharaju, Pradeep and Khatri, Ashik and Manser, Michael and Ferris, Thomas and Wunderlich, Robert and Pavlidis, Ioannis",
        journal = "Scientific Data",
        pages = "170110",
        title = "A multimodal dataset for various forms of distracted driving",
        volume = "4",
        year = "2017"
    }
    

DriveAHead | paper | link
    Description: Videos of drivers with frame-level head pose annotations obtained from a motion-capture system
    Data: driver video
    Annotations: occlusion, head pose, depth
    @inproceedings{2017_CVPRW_Schwarz,
        author = "Schwarz, Anke and Haurilet, Monica and Martinez, Manuel and Stiefelhagen, Rainer",
        booktitle = "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops",
        pages = "1--10",
        title = "Driveahead-a large-scale driver head pose dataset",
        year = "2017"
    }
    

DDD | paper | link
    Full name: Driver Drowsiness Detection Dataset
    Description: Videos of human subjects simulating different levels of drowsiness while driving in a simulator
    Data: driver video
    Annotations: drowsiness labels
    @inproceedings{2017_ACCV_Weng,
        author = "Weng, Ching-Hua and Lai, Ying-Hsiu and Lai, Shang-Hong",
        booktitle = "ACCV",
        title = "Driver drowsiness detection via a hierarchical temporal deep belief network",
        year = "2016"
    }
    

DROZY | paper | link
    Description: Videos and physiological data from subjects in different drowsiness states after prolonged waking
    Data: driver video, physiological signal
    Annotations: drowsiness labels
    @inproceedings{2016_WACV_Massoz,
        author = "Massoz, Quentin and Langohr, Thomas and Fran{\c{c}}ois, Cl{\'e}mentine and Verly, Jacques G",
        booktitle = "WACV",
        title = "The ULg multimodality drowsiness database (called DROZY) and examples of use",
        year = "2016"
    }
    

TETD | paper | link
    Full name: Traffic Eye Tracking Dataset
    Description: A set of 100 images of traffic scenes with corresponding eye-tracking data from 20 subjects
    Data: eye-tracking, scene images
    @article{2016_T-ITS_Deng,
        author = "Deng, Tao and Yang, Kaifu and Li, Yongjie and Yan, Hongmei",
        journal = "IEEE Transactions on Intelligent Transportation Systems",
        number = "7",
        pages = "2051--2062",
        publisher = "IEEE",
        title = "Where does the driver look? Top-down-based saliency detection in a traffic driving environment",
        volume = "17",
        year = "2016"
    }
    

DAD | paper | link
    Description: Videos of accidents recorded with dashboard cameras sourced from video hosting sites with annotations for accidents and road users involved in them
    Data: scene video
    Annotations: bounding boxes, accident category labels
    @inproceedings{2016_ACCV_Chan,
        author = "Chan, Fu-Hsiang and Chen, Yu-Ting and Xiang, Yu and Sun, Min",
        booktitle = "ACCV",
        title = "Anticipating accidents in dashcam videos",
        year = "2016"
    }
    

DIPLECS Surrey | paper | link
    Description: Driving videos with steering information recorded in different cars and environments
    Data: scene video, vehicle data
    @article{2015_TranVehTech_Pugeault,
        author = "Pugeault, Nicolas and Bowden, Richard",
        journal = "IEEE Transactions on Vehicular Technology",
        number = "12",
        pages = "5424--5438",
        publisher = "IEEE",
        title = "How much of driving is preattentive?",
        volume = "64",
        year = "2015"
    }
    

Brain4Cars | paper | link
    Description: Synchronized videos from scene and driver-facing cameras of drivers performing various maneuvers in traffic
    Data: driver video, scene video, vehicle data
    Annotations: action labels
    @inproceedings{2015_ICCV_Jain,
        author = "Jain, Ashesh and Koppula, Hema S and Raghavan, Bharad and Soh, Shane and Saxena, Ashutosh",
        booktitle = "ICCV",
        title = "Car that knows before you do: Anticipating maneuvers via learning temporal driving models",
        year = "2015"
    }
    

YawDD | paper | link
    Full name: Yawning Detection Dataset
    Description: Recordings of human subjects in parked vehicles simulating normal driving, singing and taslking, and yawning
    Data: driver video
    Annotations: bounding boxes, action labels
    @inproceedings{2014_ACM_Abtahi,
        author = "Abtahi, Shabnam and Omidyeganeh, Mona and Shirmohammadi, Shervin and Hariri, Behnoosh",
        booktitle = "Proceedings of the ACM Multimedia Systems Conference",
        title = "{YawDD: A yawning detection dataset}",
        year = "2014"
    }
    

3DDS | paper | link
    Full name: 3D Driving School Dataset
    Description: Videos and eye-tracking data of people playing 3D driving simulator game
    Data: eye-tracking, scene video
    @inproceedings{2011_BMVC_Borji,
        author = "Borji, Ali and Sihite, Dicky N and Itti, Laurent",
        booktitle = "BMVC",
        title = "Computational Modeling of Top-down Visual Attention in Interactive Environments.",
        year = "2011"
    }
    

DIPLECS Sweden | paper | link
    Description: Driving videos with steering information recorded in different cars and environments
    Data: scene video, vehicle data
    @inproceedings{2010_ACCV_Pugeault,
        author = "Pugeault, Nicolas and Bowden, Richard",
        booktitle = "ECCV",
        title = "Learning pre-attentive driving behaviour from holistic visual features",
        year = "2010"
    }
    

BU HeadTracking | paper | link
    Full name: Boston University Head Tracking Dataset
    Description: Videos and head tracking information for multiple human subjects recorded in diverse conditions
    Data: driver video
    Annotations: head pose
    @article{2000_PAMI_LaCascia,
        author = "La Cascia, Marco and Sclaroff, Stan and Athitsos, Vassilis",
        journal = "IEEE Transactions on pattern analysis and machine intelligence",
        number = "4",
        pages = "322--336",
        publisher = "IEEE",
        title = "Fast, reliable head tracking under varying illumination: An approach based on registration of texture-mapped 3D models",
        volume = "22",
        year = "2000"
    }
    

Dashcam dataset | link
    Description: Driving videos with steering information recorded on road
    Data: scene video
    
    

SFDDD | link
    Full name: State Farm Distracted Driver Detection
    Description: Videos of drivers performing secondary tasks
    Data: driver video
    Annotations: action labels