-
Notifications
You must be signed in to change notification settings - Fork 0
/
p__Home__index.async.js
1 lines (1 loc) · 71.1 KB
/
p__Home__index.async.js
1
"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[371],{91773:function(Ue,L,c){c.r(L),c.d(L,{default:function(){return ze}});var K=c(56690),x=c.n(K),Y=c(89728),S=c.n(Y),Q=c(61655),j=c.n(Q),$=c(26389),k=c.n($),R=c(62435),V=c(80840),q=c(42122),n=c.n(q),ee=c(70215),P=c.n(ee),ae=c(66115),O=c.n(ae),ne=c(38416),I=c.n(ne),M=c(84289),te=c(43033),e=c(86074),re=["dataSource","isMobile"],oe=function(C){j()(r,C);var v=k()(r);function r(y){var o;return x()(this,r),o=v.call(this,y),I()(O()(o),"phoneClick",function(){var u=!o.state.phoneOpen;o.setState({phoneOpen:u})}),o.state={phoneOpen:!1},o}return S()(r,[{key:"render",value:function(){var o=this,u=this.props,g=u.dataSource,h=u.isMobile,f=P()(u,re),p=this.state.phoneOpen,i=g.LinkMenu,l=i.children,m=Object.keys(l).map(function(t,a){var b=l[t],D=te.rU,d={};return b.to&&b.to.match(/\//g)&&(d.href=b.to,d.target="_blank",D="a",delete b.to),R.createElement(D,n()(n()(n()({},b),d),{},{key:a.toString()}),l[t].children)}),s=p===void 0?300:null;return(0,e.jsx)(M.ZP,n()(n()(n()({component:"header",animation:{opacity:0,type:"from"}},g.wrapper),f),{},{children:(0,e.jsxs)("div",n()(n()({},g.page),{},{className:"".concat(g.page.className).concat(p?" open":""),children:[(0,e.jsx)(M.ZP,n()(n()({animation:{x:-30,type:"from",ease:"easeOutQuad"}},g.logo),{},{children:(0,e.jsx)("img",{width:"100%",src:g.logo.children,alt:"img"})})),h&&(0,e.jsxs)("div",n()(n()({},g.mobileMenu),{},{onClick:function(){o.phoneClick()},children:[(0,e.jsx)("em",{}),(0,e.jsx)("em",{}),(0,e.jsx)("em",{})]})),(0,e.jsx)(M.ZP,n()(n()({},i),{},{animation:h?{height:0,duration:300,onComplete:function(a){o.state.phoneOpen&&(a.target.style.height="auto")},ease:"easeInOutQuad"}:null,moment:s,reverse:!!p,children:m}))]}))}))}}]),r}(R.Component),ie=oe,se=c(13012),A=c.n(se),W=c(72806),z=c(1289),ce=c(72575),Je=c(83154),le=["name","texty"],de=function(C){j()(r,C);var v=k()(r);function r(){return x()(this,r),v.apply(this,arguments)}return S()(r,[{key:"render",value:function(){var o=Object.assign({},(A()(this.props),this.props)),u=o.dataSource;delete o.dataSource,delete o.isMobile;var g=u.textWrapper.children.map(function(h){var f=h.name,p=h.texty,i=P()(h,le);return f.match("button")?(0,e.jsx)(W.Z,n()(n()({type:"primary"},i),{},{children:h.children}),f):(0,e.jsx)("div",n()(n()({},i),{},{children:p?(0,e.jsx)(ce.Z,{type:"mask-bottom",children:h.children}):h.children}),f)});return(0,e.jsx)("div",n()(n()(n()({},o),u.wrapper),{},{children:(0,e.jsx)(z.Z,n()(n()({type:["bottom","top"],delay:200},u.textWrapper),{},{children:g}),"QueueAnim")}))}}]),r}(R.PureComponent),ue=de,me=c(3600),B=c.n(me),w=c(30510),pe=c(18698),he=c.n(pe),G=/^http(s)?:\/\/([\w-]+\.)+[\w-]+(\/[\w-./?%&=]*)?/,U=function(v,r){var y=v.name.indexOf("title")===0?"h1":"div";y=v.href?"a":y;var o=typeof v.children=="string"&&v.children.match(G)?R.createElement("img",{src:v.children,alt:"img"}):v.children;return v.name.indexOf("button")===0&&he()(v.children)==="object"&&(o=R.createElement(W.Z,n()({},v.children))),R.createElement(y,n()({key:r.toString()},v),o)},ve=["childWrapper"],ge=["columns","dataSource"],fe=["dataSource","isMobile"],be=["columns","dataSource"],Ze=function(C){j()(r,C);var v=k()(r);function r(){var y;x()(this,r);for(var o=arguments.length,u=new Array(o),g=0;g<o;g++)u[g]=arguments[g];return y=v.call.apply(v,[this].concat(u)),I()(O()(y),"getColumns",function(h){return h.map(function(f){var p=f.childWrapper,i=P()(f,ve);return n()(n()({align:"center"},i),{},{title:(0,e.jsx)("div",n()(n()({},p),{},{children:p.children.map(U)}))})})}),I()(O()(y),"getDataSource",function(h,f){return h.map(function(p,i){var l={key:i.toString()};return p.children.forEach(function(m,s){f[s]&&(l[f[s].key]=(0,e.jsx)("div",n()(n()({},m),{},{children:typeof m.children=="string"&&m.children.match(G)?(0,e.jsx)("img",{src:m.children,alt:"img"}):m.children})))}),l})}),I()(O()(y),"getMobileChild",function(h){var f=h.columns,p=h.dataSource,i=P()(h,ge),l=f.children.filter(function(s){return s.key.indexOf("name")>=0}),m=f.children.filter(function(s){return s.key.indexOf("name")===-1});return m.map(function(s,t){var a=[].concat(l[0],s).filter(function(d){return d});a.length>1&&(a[0].colSpan=0,a[1].colSpan=2);var b=p.children.map(function(d){var Be=d.children.filter(function(F){return F.name.indexOf("name")===-1}),Ge=d.children.filter(function(F){return F.name.indexOf("name")>=0});return n()(n()({},d),{},{children:[].concat(Ge[0],Be[t]).filter(function(F){return F})})}),D=n()(n()({},i),{},{columns:y.getColumns(a),dataSource:y.getDataSource(b,a)});return(0,e.jsx)(w.Z,n()(n()({},D),{},{pagination:!1,bordered:!0}),t.toString())})}),y}return S()(r,[{key:"render",value:function(){var o=this.props,u=o.dataSource,g=o.isMobile,h=P()(o,fe),f=u.Table,p=u.wrapper,i=u.page,l=u.titleWrapper,m=f.columns,s=f.dataSource,t=P()(f,be),a=n()(n()({},t),{},{columns:this.getColumns(m.children),dataSource:this.getDataSource(s.children,m.children)}),b=g?this.getMobileChild(f):(0,e.jsx)(w.Z,n()(n()({},a),{},{pagination:!1,bordered:!0}),"table");return(0,e.jsx)("div",n()(n()(n()({},h),p),{},{children:(0,e.jsx)("div",n()({},i))}))}}]),r}(R.PureComponent),Xe=null,He=c(61254),T=c(49843),E=c(25968),N=c(6226),ye=c(45098),Ce=function(C){j()(r,C);var v=k()(r);function r(){return x()(this,r),v.apply(this,arguments)}return S()(r,[{key:"render",value:function(){var o=Object.assign({},(A()(this.props),this.props)),u=o.dataSource;delete o.dataSource,delete o.isMobile;var g=[{name:"Color, Texture, Shape",star:1},{name:"Range Measurement",star:5},{name:"Velocity Measurement",star:5},{name:"Lighting Robustness",star:5},{name:"Weather Robustness",star:5},{name:"Classification Ability",star:2},{name:"3D Perception",star:1},{name:"Cost Advantage",star:4}],h={data:g.map(function(s){return n()(n()({},s),{},{star:s.star})}),xField:"name",yField:"star",appendPadding:[0,20,0,20],color:"#B2934A",legend:!0,meta:{star:{alias:"Radar Ability",min:0,nice:!0,formatter:function(t){return t}}},xAxis:{tickLine:null},yAxis:{label:!1,grid:{alternateColor:"rgba(0, 0, 0, 0.04)"}},point:{size:2},area:{}},f=[{name:"Color, Texture, Shape",star:5},{name:"Range Measurement",star:2},{name:"Velocity Measurement",star:2},{name:"Lighting Robustness",star:3},{name:"Weather Robustness",star:3},{name:"Classification Ability",star:5},{name:"3D Perception",star:3},{name:"Cost Advantage",star:5}],p={data:f.map(function(s){return n()(n()({},s),{},{star:s.star})}),xField:"name",yField:"star",appendPadding:[0,20,0,20],color:"#B66A6A",meta:{star:{alias:"Camera Ability",min:0,nice:!0,formatter:function(t){return t}}},xAxis:{tickLine:null},yAxis:{label:!1,grid:{alternateColor:"rgba(0, 0, 0, 0.04)"}},point:{size:2},area:{}},i=[{name:"Color, Texture, Shape",star:5},{name:"Range Measurement",star:5},{name:"Velocity Measurement",star:5},{name:"Lighting Robustness",star:5},{name:"Weather Robustness",star:5},{name:"Classification Ability",star:5},{name:"3D Perception",star:3},{name:"Cost Advantage",star:4}],l={data:i.map(function(s){return n()(n()({},s),{},{star:s.star})}),xField:"name",yField:"star",color:"#589D9D",meta:{star:{alias:"Fusion Ability",min:0,nice:!0,formatter:function(t){return t}}},xAxis:{tickLine:null},yAxis:{label:!1,grid:{alternateColor:"rgba(0, 0, 0, 0.04)"}},point:{size:2},area:{}},m=function(t,a,b,D){console.log("params",t,a,b,D)};return(0,e.jsx)("div",n()(n()(n()({},o),u.wrapper),{},{id:"characteristics",children:(0,e.jsx)("div",{className:"title-wrapper",children:(0,e.jsxs)("div",{className:"chart",children:[(0,e.jsx)("h2",{name:"title",className:"title-h5",children:"Radar-Camera Characteristics"}),(0,e.jsxs)(E.Z,{justify:"start",align:"middle",children:[(0,e.jsx)(N.Z,{span:8,offset:4,children:(0,e.jsxs)(E.Z,{children:[(0,e.jsx)(N.Z,{span:24,children:(0,e.jsx)(T.Z,n()({},h))}),(0,e.jsx)(N.Z,{span:24,children:(0,e.jsx)(T.Z,n()({},p))})]})}),(0,e.jsx)(N.Z,{span:12,children:(0,e.jsx)(E.Z,{align:"start",children:(0,e.jsx)(N.Z,{span:18,children:(0,e.jsx)(T.Z,n()({},l))})})})]})]})})}))}}]),r}(R.PureComponent),De=Ce,Re=c(78677),_=c(20550),xe=function(C){j()(r,C);var v=k()(r);function r(){return x()(this,r),v.apply(this,arguments)}return S()(r,[{key:"render",value:function(){var o=Object.assign({},(A()(this.props),this.props)),u=o.dataSource;delete o.dataSource,delete o.isMobile;var g=[{type:"Point Cloud",x:"nuScenes",y:60},{type:"Point Cloud",x:"Astyx",y:3},{type:"Point Cloud",x:"SeeingThroughFog",y:30},{type:"Radar Tensor",x:"CARRADA",y:28},{type:"Point Cloud",x:"HawkEye",y:10},{type:"Radar Tensor",x:"Zender",y:13},{type:"Point Cloud",x:"Zender",y:13},{type:"Radar Tensor",x:"RADIATE",y:65},{type:"Point Cloud",x:"AIODrive",y:70},{type:"Radar Tensor",x:"CRUW",y:90},{type:"ADC Signal",x:"RaDICaL",y:88},{type:"Point Cloud",x:"RadarScenes",y:60},{type:"Radar Tensor",x:"RADDet",y:24},{type:"Radar Tensor",x:"FloW",y:6},{type:"Point Cloud",x:"FloW",y:6},{type:"ADC Signal",x:"RADIal",y:7},{type:"Radar Tensor",x:"RADIal",y:7},{type:"Point Cloud",x:"RADIal",y:7},{type:"Point Cloud",x:"VOD",y:21},{type:"Point Cloud",x:"Boreas",y:18},{type:"Point Cloud",x:"TJ4DRadSet",y:60},{type:"Radar Tensor",x:"K-Radar",y:55},{type:"Radar Tensor",x:"aiMotive",y:40},{type:"Point Cloud",x:"WaterScenes",y:66}],h=[],f=[40,.5,13,12,3,11,44,100,400,393,49,10,4,8,8,7,40,35,26,54],p=[60,8,30,28,10,26,65,70,90,88,60,24,12,21,21,18,60,55,40,66];(0,ye.S6)(f,function(t,a){h.push({type:"text",position:[a,p[a]],content:"".concat(t,"k"),style:{textAlign:"center",fontSize:14,fill:"rgba(0,0,0,0.85)"},offsetY:-10})});var i={data:g,isStack:!0,legend:{layout:"horizontal",position:"bottom"},xAxis:{label:{autoRotate:!0,rotate:"100",offset:20}},columnWidthRatio:.5,autoFit:!0,appendPadding:[0,100,10,100],xField:"x",yField:"y",seriesField:"type",columnSize:10,color:["#9C6657","#B2934A","#5F9C6B","#5b8ff9","#5d7092","#e8684a"],label:{content:function(a){var b=parseFloat(a.value);if(b<.05)return(b*100).toFixed(1)+"%"},offset:10},tooltip:{showContent:!0,customItems:function(a){return a},formatter:function(a){return console.log(a),{name:a.type,value:"\u2705"}}},annotations:h},l=[{title:"Id",dataIndex:"key",width:"10px"},{title:"Name",dataIndex:"name",width:"10%",render:function(a,b){return(0,e.jsxs)("div",{children:[(0,e.jsx)("a",{target:"_blank",href:a[1],children:a[0]})," [",(0,e.jsx)("a",{href:"#references",children:a[2]}),"]"]})}},{title:"Year",dataIndex:"year",sorter:function(a,b){return a.year-b.year}},{title:"Task",dataIndex:"task",filters:[{text:"Object Detection",value:"Object Detection"},{text:"Semantic Segmentation",value:"Semantic Segmentation"}],onFilter:function(a,b){return b.task.includes(a)},filterSearch:!0,render:function(a,b){return(0,e.jsx)("span",{children:a.map(function(D){var d="";switch(D){case"Object Detection":d="#1890ff";break;case"Semantic Segmentation":d="#fa541c";break;case"Object Tracking":d="#fa8c16";break;case"Localization":d="#13c2c2";break;case"Planning":d="#52c41a";break;case"Prediction":d="#f5222d";break;case"":d="#722ed1";break;case"":d="#eb2f96";break;case"":d="#722ed1";break;default:d="blue-inverse"}return(0,e.jsx)(_.Z,{color:d,children:D},D)})})}},{title:"Annotation",dataIndex:"annotation",filters:[{text:"2D box-level",value:"2D box-level"},{text:"3D box-level",value:"3D box-level"},{text:"2D pixel-level",value:"2D pixel-level"},{text:"3D point-level",value:"3D point-level"}],onFilter:function(a,b){return b.annotation.includes(a)},filterSearch:!0,render:function(a,b){return(0,e.jsx)("span",{children:a.map(function(D){var d="";switch(D){case"2D box-level":d="#1890ff";break;case"3D box-level":d="#fa541c";break;case"2D pixel-level":d="#fa8c16";break;case"2D point-level":d="#13c2c2";break;default:d="blue-inverse"}return(0,e.jsx)(_.Z,{color:d,children:D},D)})})}},{title:"Radar Data Representation",dataIndex:"radar_data_representation",filters:[{text:"Point Cloud",value:"Point Cloud"},{text:"Range-Doppler Tensor",value:"Range-Doppler Tensor"},{text:"Range-Azimuth Tensor",value:"Range-Azimuth Tensor"},{text:"Range-Azimuth-Doppler Tensor",value:"Range-Azimuth-Doppler Tensor"}],onFilter:function(a,b){return b.radar_data_representation.includes(a)},filterSearch:!0,render:function(a,b){return(0,e.jsx)("span",{children:a.map(function(D){var d="";switch(D){case"Point Cloud":d="#108ee9";break;case"ADC Signal":d="#f50";break;case"Range-Doppler Tensor":d="#2db7f5";case"Range-Azimuth Tensor":d="#2db7f5";case"Range-Azimuth-Doppler Tensor":d="#2db7f5";break;case"Grid Map":d="#87d068";break;default:d="#108ee9"}return(0,e.jsx)(_.Z,{color:d,children:D},D)})})}},{title:"Category Number",dataIndex:"category_number"},{title:"Categories",dataIndex:"categories"},w.Z.EXPAND_COLUMN,{title:"Record Area",dataIndex:"record_area"},{title:"Record Time",dataIndex:"record_time"},{title:"Affiliation",dataIndex:"affiliation"}],m=[{key:"1",name:["nuScenes","https://www.nuscenes.org/nuscenes","1"],year:2019,task:["Object Detection","Object Tracking","Localization","Planning","Prediction"],annotation:["3D box-level"],radar_data_representation:["Point Cloud"],category_number:23,categories:"Vehicle, Pedestrian, Bicycle, Movable Object, Static Object, etc.",size:"1000 scenes, 1.4M boxes, 40k frames, 5.5 hours",scenarios:["A diverse set of locations (urban, residential, nature and industrial), times (day and night)","sun, rain and clouds"],record_area:"Boston, Singapore",record_time:"September 2018",affiliation:"nuTonomy"},{key:"2",name:["Astyx","http://www.astyx.net","2"],year:2019,task:["Object Detection"],annotation:["3D box-level"],radar_data_representation:["Point Cloud"],category_number:7,categories:"Bus, Car, Cyclist, Motorcyclist, Person, Trailer, Truck",size:"500 frames, around 3000 labeled objects",scenarios:["-"],record_area:"South of Germany",record_time:"-",affiliation:"Technical University of Munich"},{key:"3",name:["SeeingThroughFog","https://www.uni-ulm.de/en/in/driveu/projects/dense-datasets/","3"],year:2020,task:["Object Detection"],annotation:["2D box-level","3D box-level"],radar_data_representation:["Point Cloud"],category_number:4,categories:"Passenger Car, Large Vehicle, Pedestrian, Ridable Vehicle",size:"12k samples in real-world driving scenes and 1.5k samples in controlled weather conditions within a fog chamber, 100k objects",scenarios:["Pedestrian zone, residential area, construction area and highway, daytime and street condition","under all weather conditions. Severe weather \u2013 such as snow, heavy rain or fog"],record_area:"Germany, Sweden, Denmark, and Finland",record_time:"February and December 2019",affiliation:"Mercedes-Benz AG"},{key:"4",name:["CARRADA","https://arthurouaknine.github.io/codeanddata/carrada","4"],year:2020,task:["Object Detection","Semantic Segmentation","Object Tracking","Trajectory Prediction"],annotation:["2D box-level","2D pixel-level"],radar_data_representation:["Range-Doppler Tensor","Range-Azimuth Tensor"],category_number:3,categories:"Pedestrian, Car, Cyclist",size:"12,666 frames, 78 instances, 7,139 annotated frames with instances, 23GB synchronized camera and radar views",scenarios:["Urban driving scenarios","adverse weather conditions"],record_area:"Canada",record_time:"-",affiliation:"T\xE9l\xE9com Paris"},{key:"5",name:["HawkEye","https://jguan.page/HawkEye/","5"],year:2020,task:["Semantic Segmentation"],annotation:["3D point-level"],radar_data_representation:["Point Cloud"],category_number:9,categories:"Sub-compact, Compact, Mid-sized, Full-sized, Sports, SUVs, Jeep, Vans, Trucks",size:"3k images, 4k scenes, 120 car models",scenarios:["327 scenes of cars in 3 types of backgrounds: indoor parking garage, outdoor lot, and outdoor house drive-through."],record_area:"-",record_time:"-",affiliation:"University of Illinois at Urbana-Champaign"},{key:"6",name:["Zendar","http://zendar.io/dataset","6"],year:2020,task:["Object Detection","Mapping","Localization"],annotation:["2D box-level"],radar_data_representation:["Range-Doppler Tensor","Range-Azimuth Tensor","Point Cloud"],category_number:1,categories:"Car",size:"Over 11k moving cars labeled in 27 diverse scenes with over 40k automatically generated labels",scenarios:["Complex urban driving scenarios"],record_area:"-",record_time:"-",affiliation:"Zendar"},{key:"7",name:["RADIATE","http://pro.hw.ac.uk/radiate/","7"],year:2020,task:["Object Detection","Object Tracking","SLAM","Scene Understanding"],annotation:["2D box-level"],radar_data_representation:["Range-Azimuth Tensor"],category_number:8,categories:"Car, Van, Bus, Truck, Motorbike, Bicycle, Pedestrian and a group of pedestrians",size:"200k bounding boxes over 44k radar frames",scenarios:["driving scenarios (e.g., parked, urban, motorway and suburban)","a variety of weather conditions (e.g., sun, night, rain, fog and snow)"],record_area:"Edinburgh",record_time:"Between February 2019 and February 2020",affiliation:"Heriot-Watt University"},{key:"8",name:["AIODrive","http://www.aiodrive.org/","8"],year:2020,task:["Object Detection","Object Tracking","Semantic Segmentation","Trajectory Prediction","Depth Estimation"],annotation:["2D box-level","3D box-level"],radar_data_representation:["Point Cloud"],category_number:11,categories:"Vehicle, Pedestrian, Vegetation, Building, Road, Sidewalk, Wall, Traffic Sign, Pole and Fence",size:"500k annotated images for 5 camera viewpoints, 100k annotated frames for radar sensor",scenarios:["Crowded scenes, people running, high-speed driving, violations of the traffic rule, and car accidents.","Adverse weather and lighting."],record_area:"one of eight cities from Carla assets",record_time:"-",affiliation:"Carnegie Mellon University"},{key:"9",name:["CRUW","https://www.cruwdataset.org/","9"],year:2021,task:["Object Detection"],annotation:["2D box-level"],radar_data_representation:["Range-Azimuth Tensor"],category_number:3,categories:"Pedestrian, Cyclist, Car",size:"400K frames, 260K objects, 3.5 hours",scenarios:["Area: parking lot, campus road, city street, and highway. Several vision-fail scenarios where the image qualities are pretty bad, i.e., dark, strong light, blur, etc.","strong/weak lighting condition"],record_area:"-",record_time:"-",affiliation:"University of Washington"},{key:"10",name:["RaDICaL","https://publish.illinois.edu/radicaldata/","10"],year:2021,task:["Object Detection"],annotation:["2D box-level"],radar_data_representation:["ADC Signal"],category_number:2,categories:"Pedestrian, Car",size:"393k frames",scenarios:["Indoor: people, static clutter; outdoor: neighborhood, suburban, highways and city roads."],record_area:"-",record_time:"-",affiliation:"University of Illinois at Urbana-Champaign"},{key:"11",name:["RadarScenes","https://radar-scenes.com/","11"],year:2021,task:["Object Detection","Semantic Segmentation"],annotation:["2D point-level"],radar_data_representation:["Point Cloud"],category_number:11,categories:"Car, Large Vehicle, Truck, Bus, Train, Bicycle, Motorized Two-wheeler, Pedestrian, Pedestrian Group, Animal, and Other",size:"40.208 frames, 158 individual sequences, 118.9M radar points",scenarios:["Inner city, T-junction, commercial area, urban area, country road, road works"],record_area:"Ulm, Germany",record_time:"Between 2016 and 2018",affiliation:"Mercedes-Benz AG, Stuttgart, Germany"},{key:"12",name:["RADDet","https://github.com/ZhangAoCanada/RADDet","12"],year:2021,task:["Object Detection"],annotation:["2D box-level","3D box-level"],radar_data_representation:["Range-Azimuth-Doppler Tensor"],category_number:6,categories:"Person, Bicycle, Car, Motorcycle, Bus, Truck",size:"10,158 frames",scenarios:["Sidewalks","sunny weather conditions"],record_area:"-",record_time:"September to October 2020",affiliation:"University of Ottawa"},{key:"13",name:["FloW","https://github.com/ORCA-Uboat/FloW-Dataset","13"],year:2021,task:["Object Detection"],annotation:["2D box-level"],radar_data_representation:["Range-Doppler Tensor","Point Cloud"],category_number:1,categories:"Bottle",size:"4k frames",scenarios:["Inland water surface"],record_area:"-",record_time:"-",affiliation:"ORCA-Uboat"},{key:"14",name:["RADIal","https://github.com/valeoai/RADIal","14"],year:2021,task:["Object Detection","Semantic Segmentation"],annotation:["2D box-level"],radar_data_representation:["ADC Signal","Range-Azimuth-Doppler Tensor","Range-Azimuth Tensor","Range-Doppler Tensor","Point Cloud"],category_number:1,categories:"Vehicle",size:"8,252 frames are labelled with 9,550 vehicle",scenarios:["City street, highway, countryside road"],record_area:"-",record_time:"-",affiliation:"Valeo.ai, Paris, France"},{key:"15",name:["VoD","https://tudelft-iv.github.io/view-of-delft-dataset/","15"],year:2022,task:["Object Detection"],annotation:["2D box-level","3D box-level"],radar_data_representation:["Point Cloud"],category_number:13,categories:"Car, Pedestrian, Cyclist, Rider, Unused Bicycle, Bicycle Rack, Human Depiction, Moped or Scooter, Motor, Ride Other, Vehicle Other, Truck, Ride Uncertain",size:"8693 frames, 123,106 annotations of both moving and static objects, including 26,587 pedestrian, 10,800 cyclist and 26,949 car labels",scenarios:["Campus, suburb and old-town locations. With a preference for scenarios containing vulnerable road users"],record_area:"City of Delft (The Netherlands)",record_time:"-",affiliation:"TU Delft, The Netherlands"},{key:"16",name:["Boreas","https://www.boreas.utias.utoronto.ca/","16"],year:2022,task:["Object Detection","Localization","Odometry"],annotation:["2D box-level"],radar_data_representation:["Range-Azimuth Tensor"],category_number:4,categories:"Car, Pedestrian, Cyclist, Misc",size:"7.1k frames for detection, over 350km of driving data, 326,180 unique 3D box annotations",scenarios:["a repeated route near the University of Toronto Institute for Aerospace Studies (UTIAS)","various weather conditions (sun, cloud, rain, night, snow) and seasons."],record_area:"University of Toronto Institute for Aerospace Studies (UTIAS)",record_time:"November, 2020 and \uFB01nishing in November, 2021",affiliation:"University of Toronto"},{key:"17",name:["TJ4DRadSet","https://github.com/TJRadarLab/TJ4DRadSet","17"],year:2022,task:["Object Detection","Object Tracking"],annotation:["3D box-level"],radar_data_representation:["Point Cloud"],category_number:8,categories:"Car, Pedestrian, Cyclist, Bus, Motorcyclist, Truck, Engineering Vehicle, Tricyclist",size:"40K frames in total, 7757 frames within 44 consecutive sequences",scenarios:["various driving scenarios, different road types, such as urban roads, elevated roads, industrial zones, etc.","various lighting conditions, such as normal lighting, bright light and darkness, and different road types, such as urban roads, elevated roads, industrial zones, etc. Complex scenarios such as object-dense intersections, and simple scenarios such as one-way streets with a few objects."],record_area:"Suzhou, China",record_time:"Fourth quarter of 2021",affiliation:"Tongji University"},{key:"18",name:["K-Radar","https://github.com/kaist-avelab/k-radar","18"],year:2022,task:["Object Detection","Object Tracking","SLAM"],annotation:["3D box-level"],radar_data_representation:["Range-Azimuth-Doppler Tensor"],category_number:5,categories:"Pedestrian, Motobike, Bicycle, Sedan, Bus or Truck",size:"35K frames of 4D radar tensor",scenarios:["adverse weathers (fog, rain, and snow)","various road structures (urban, suburban roads, alleyways, and highways)."],record_area:"Daejeon of the Republic of Korea",record_time:"-",affiliation:"KAIST"},{key:"19",name:["aiMotive","https://github.com/aimotive/aimotive_dataset","19"],year:2022,task:["Object Detection"],annotation:["3D box-level"],radar_data_representation:["Point cloud"],category_number:14,categories:"Pedestrian, Car, Bus, Truck, Van, Motorcycle, Pickup, Rider, Bicycle, Trailer, Train, Shopping Cart, Other Object",size:"26,583 frames, 425k objects",scenarios:["a diverse set of locations(highway, suburban, urban), times(daytime, night), and weather conditions(sun, cloud, rain, glare).","highway, urban, and suburban areas"],record_area:"California, US; Austria; and Hungary ",record_time:"-",affiliation:"aimotive"},{key:"20",name:["WaterScenes","https://waterscenes.github.io","20"],year:2023,task:["Object Detection","Object Detection","Instance Segmentation","Semantic Segmentation","Free-space Segmentation","Waterline Segmentation","Panoptic Perception"],annotation:["2D box-level","2D pixel-level","2D line-level","3D point-level"],radar_data_representation:["point cloud"],category_number:7,categories:"Pier, Buoy, Sailor, Ship, Boat, Vessel, Kayak",size:"54,120 frames, 200k objects",scenarios:["diverse time conditions (daytime, nightfall, night), lighting conditions (normal, dim, strong), weather conditions (sunny, overcast, rainy, snowy) and waterway conditions (river, lake, canal, moat)"],record_area:"Suzhou, China",record_time:"2022/08-2022/12",affiliation:"XJTLU"}],s=function(a,b,D,d){console.log("params",a,b,D,d)};return(0,e.jsx)("div",n()(n()(n()({},o),u.wrapper),{},{id:"datasets",children:(0,e.jsxs)("div",{className:"title-wrapper",children:[(0,e.jsxs)("div",{className:"chart",children:[(0,e.jsx)("h2",{name:"title",className:"title-h2",children:"Radar-Camera Fusion Datasets"}),(0,e.jsx)(Re.Z,n()(n()({},i),{},{style:{textAlign:"center"}}))]}),(0,e.jsx)("br",{}),(0,e.jsx)("br",{}),(0,e.jsx)("br",{}),(0,e.jsx)(w.Z,{bordered:!0,scroll:{x:"200px"},pagination:{pageSize:10,hideOnSinglePage:!0},columns:l,dataSource:m,onChange:s,expandable:{columnTitle:"Size / Scenarios",expandedRowRender:function(a){return(0,e.jsxs)("p",{style:{margin:0},children:["Size: ",a.size,(0,e.jsx)("br",{}),"Scenarios: ",a.scenarios]})},rowExpandable:function(a){return a.name!=="Not Expandable"}}})]})}))}}]),r}(R.PureComponent),Se=xe,je=function(C){j()(r,C);var v=k()(r);function r(){return x()(this,r),v.apply(this,arguments)}return S()(r,[{key:"render",value:function(){var o=Object.assign({},(A()(this.props),this.props)),u=o.dataSource;delete o.dataSource,delete o.isMobile;var g=[{title:"Id",dataIndex:"key",width:"10px"},{title:"Name",dataIndex:"name",width:"10%",render:function(i,l){var m=i.toString().split(",");return(0,e.jsxs)("div",{children:[m[0]," [",(0,e.jsx)("a",{href:"#references",children:parseInt(m[1].trim())+19}),"]"]})}},{title:"Short Name",dataIndex:"short_name"},{title:"Year",dataIndex:"year",sorter:function(i,l){return i.year-l.year}},{title:"Task",dataIndex:"task",filters:[{text:"Object Detection",value:"Object Detection"},{text:"Semantic Segmentation",value:"Semantic Segmentation"}],onFilter:function(i,l){return l.task.includes(i)},filterSearch:!0,width:"10%",render:function(i,l){var m=i.toString().split("|");console.log(m);var s=[];return m.map(function(t){t=t.trim();var a="";switch(t){case"Object Detection":a="#1890ff";break;case"Semantic Segmentation":a="#fa541c";break;case"Object Tracking":a="#fa8c16";break;case"Localization":a="#13c2c2";break;case"Planning":a="#52c41a";break;case"Prediction":a="#f5222d";break;case"Object Classification":a="#eb2f96";break;default:a="blue-inverse"}s.push((0,e.jsx)(_.Z,{color:a,children:t},t))}),s}},{title:"Annotation",dataIndex:"annotation",filters:[{text:"2D box-level",value:"2D box-level"},{text:"3D box-level",value:"3D box-level"},{text:"2D pixel-level",value:"2D pixel-level"},{text:"3D point-level",value:"3D point-level"}],onFilter:function(i,l){return l.annotation.includes(i)},filterSearch:!0,render:function(i,l){var m=i.toString().split("|"),s=[];return m.map(function(t){t=t.trim();var a="";switch(t){case"2D box-level":a="#1890ff";break;case"3D box-level":a="#fa541c";break;case"2D pixel-level":a="#fa8c16";break;case"2D point-level":a="#13c2c2";break;default:a="blue-inverse"}s.push((0,e.jsx)(_.Z,{color:a,children:t},t))}),s}},{title:"Radar Data Representation",dataIndex:"radar_data_representation",filters:[{text:"Point Cloud",value:"Point Cloud"},{text:"Range-Doppler Tensor",value:"Range-Doppler Tensor"},{text:"Range-Azimuth Tensor",value:"Range-Azimuth Tensor"},{text:"Range-Azimuth-Doppler Tensor",value:"Range-Azimuth-Doppler Tensor"}],onFilter:function(i,l){return l.radar_data_representation.includes(i)},filterSearch:!0,render:function(i,l){var m=i.toString().split("|"),s=[];return m.map(function(t){t=t.trim();var a="";switch(t){case"Point Cloud":a="#108ee9";break;case"ADC Signal":a="#f50";break;case"Range-Doppler Tensor":a="#2db7f5";case"Range-Azimuth Tensor":a="#2db7f5";case"Range-Azimuth-Doppler Tensor":a="#2db7f5";default:a="blue-inverse"}s.push((0,e.jsx)(_.Z,{color:a,children:t},t))}),s}},{title:"Fusion Level",dataIndex:"fusion_level",filters:[{text:"Object Level",value:"Object Level"},{text:"Data Level",value:"Data Level"},{text:"Feature Level",value:"Feature Level"},{text:"Mixed Level",value:"Mixed Level"}],onFilter:function(i,l){return l.fusion_level.includes(i)},filterSearch:!0,render:function(i,l){var m=i.toString().split("|"),s=[];return m.map(function(t){t=t.trim();var a="";switch(t){case"Object Level":a="#1890ff";break;case"Data Level":a="#fa541c";break;case"Feature Level ":a="#fa8c16";break;case"Mixed Level":a="#13c2c2";break;default:a="blue-inverse"}s.push((0,e.jsx)(_.Z,{color:a,children:t},t))}),s}},{title:"Fusion Operation",dataIndex:"fusion_operation",render:function(i,l){for(var m=i.toString().split("|"),s="",t=0;t<m.length;t++)t==0?s=m[t]:s=(0,e.jsxs)("span",{children:[s,(0,e.jsx)("br",{}),m[t]]});return(0,e.jsx)("div",{children:s})}},{title:"Network",dataIndex:"network"},{title:"Projection",dataIndex:"projection"},{title:"Dataset",dataIndex:"dataset",filters:[{text:"nuScenes",value:"nuScenes"}],onFilter:function(i,l){return l.dataset.includes(i)},filterSearch:!0,render:function(i,l){var m=i.toString().split("|"),s=[];return m.map(function(t){s.push((0,e.jsxs)("div",{children:[(0,e.jsx)("span",{children:t}),(0,e.jsx)("br",{})]}))}),s}},{title:"Evaluation Metrics",dataIndex:"evaluation_metrics",render:function(i,l){var m=i.toString().split("|"),s=[];return m.map(function(t){s.push((0,e.jsxs)("div",{children:[(0,e.jsx)("span",{children:t}),(0,e.jsx)("br",{})]}))}),s}},{title:"Conference/Journal",dataIndex:"conference_journal"},{title:"Source Code",dataIndex:"source_code",render:function(i,l){if(console.log(i),i!="-")return(0,e.jsx)("a",{target:"_blank",href:i,children:i})}}],h=[{key:"1",name:"Distant vehicle detection using radar and vision, 1",short_name:"-",year:2019,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Feature Level",fusion_operation:"Addition | Concatenation",network:"One-stage network based on ResNet",dataset:"Self-Recorded",evaluation_metrics:"AP",conference_journal:"2019 International Conference on Robotics and Automation (ICRA)",source_code:"-"},{key:"2",name:"RRPN: Radar Region Proposal Network for Object Detection in Autonomous Vehicles, 2",short_name:"RRPN",year:2019,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar proposal to image plane",fusion_level:"Data Level",fusion_operation:"Transformation matrix",network:"RRPN",dataset:"nuScenes",evaluation_metrics:"AP | AR",conference_journal:"2019 IEEE International Conference on Image Processing (ICIP)",source_code:"https://github.com/mrnabati/RRPN"},{key:"3",name:"Object Detection and Identification using Vision and Radar Data Fusion System for Ground-based Navigation, 3",short_name:"-",year:2019,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Object Level",fusion_operation:"Transformation matrix",network:"YOLOv3",dataset:"Self-Recorded",evaluation_metrics:"-",conference_journal:"2019 6th International Conference on Signal Processing and Integrated Networks (SPIN)",source_code:"-"},{key:"4",name:"Automotive radar and camera fusion using Generative Adversarial Networks, 4",short_name:"CMGGAN",year:2019,task:"Semantic Segmentation",annotation:"2D point-level",radar_data_representation:"Grid Map",projection:"-",fusion_level:"Feature Level",fusion_operation:"Addition",network:"CMGGAN",dataset:"Self-Recorded",evaluation_metrics:"TP",conference_journal:"Elsevier Computer Vision and Image Understanding",source_code:"-"},{key:"5",name:"Deep Learning Based 3D Object Detection for Automotive Radar and Camera, 5",short_name:"-",year:2019,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to BEV",fusion_level:"Data Level",fusion_operation:"Transformation matrix",network:"A 3D region proposal network based on VGG",dataset:"Astyx",evaluation_metrics:"AP | PRC",conference_journal:"2019 16th European Radar Conference (EuRAD)",source_code:"-"},{key:"6",name:"RVNet: Deep Sensor Fusion of Monocular Camera and Radar for Image-Based Obstacle Detection in Challenging Environments, 6",short_name:"RVNet",year:2019,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Feature Level",fusion_operation:"Concatenation",network:"RVNet based on YOLOv3",dataset:"nuScenes",evaluation_metrics:"AP | mAP",conference_journal:"2019 Pacific-Rim Symposium on Image and Video Technology (PSIVT)",source_code:"-"},{key:"7",name:"Radar and Camera Early Fusion for Vehicle Detection in Advanced Driver Assistance Systems, 7",short_name:"FusionNet",year:2019,task:"Object Detection | Object Classification",annotation:"2D box-level",radar_data_representation:"Range-Azimuth Tensor",projection:"Image to BEV",fusion_level:"Feature Level",fusion_operation:"Concatenation",network:"FusionNet inspired by SSD",dataset:"Self-Recorded",evaluation_metrics:"mAP",conference_journal:"33rd Conference on Neural Information Processing Systems",source_code:"-"},{key:"8",name:"SO-Net: Joint Semantic Segmentation and Obstacle Detection Using Deep Fusion of Monocular Camera and Radar, 8",short_name:"SO-Net",year:2020,task:"Object Detection | Semantic Segmentation",annotation:"2D box-level | 2D pixel-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Feature Level",fusion_operation:"Concatenation",network:"SO-Net based on the RVNet",dataset:"nuScenes",evaluation_metrics:"AP",conference_journal:"2019 Pacific-Rim Symposium on Image and Video Technology (PSIVT)",source_code:"-"},{key:"9",name:"Spatial Attention Fusion for Obstacle Detection Using MmWave Radar and Vision Sensor, 9",short_name:"SAF-FCOS",year:2020,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Feature Level",fusion_operation:"Addition | Multiplication",network:"SAF based on FCOS",dataset:"nuScenes",evaluation_metrics:"AP",conference_journal:"Sensors",source_code:"https://github.com/Singingkettle/SAF-FCOS"},{key:"10",name:"A Deep learning-based radar and camera sensor fusion architecture for object detection, 10",short_name:"CRF-Net",year:2019,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Data Level",fusion_operation:"Concatenation",network:"CRF-Net based on RetinaNet",dataset:"nuScenes | Self-Recorded",evaluation_metrics:"mAP",conference_journal:"2019 Sensor Data Fusion: Trends, Solutions, Applications (SDF)",source_code:"https://github.com/TUMFTM/CameraRadarFusionNet"},{key:"11",name:"Seeing Through Fog Without Seeing Fog: Deep Multimodal Sensor Fusion in Unseen Adverse Weather, 11",short_name:"-",year:2020,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Feature Level",fusion_operation:"Concatenation | Attention",network:"A modified VGG backbone and SSD blocks",dataset:"DENSE",evaluation_metrics:"AP",conference_journal:"2020 CVPR",source_code:"https://github.com/princeton-computational-imaging/SeeingThroughFog"},{key:"12",name:"Radar+RGB Attentive Fusion for Robust Object Detection in Autonomous Vehicles, 12",short_name:"BIRANet",year:2020,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"-",fusion_level:"Feature Level",fusion_operation:"Addition",network:"RANet and BIRANet based on ResNet",dataset:"nuScenes",evaluation_metrics:"-",conference_journal:"2020 ICIP",source_code:"https://github.com/RituYadav92/Radar-RGB-Attentive-Multimodal-Object-Detection"},{key:"13",name:"Radar-Camera Sensor Fusion for Joint Object Detection and Distance Estimation in Autonomous Vehicles, 13",short_name:"-",year:2020,task:"Object Detection | Depth Estimation",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar proposal to image plane",fusion_level:"Mixed Level",fusion_operation:"-",network:"FPN with ResNet as backbone, and RPN in Faster R-CNN",dataset:"nuScenes",evaluation_metrics:"AP | AR",conference_journal:"2020 IROS",source_code:"-"},{key:"14",name:"YOdar: Uncertainty-based Sensor Fusion for Vehicle Detection with Camera and Radar Sensors, 14",short_name:"YOdar",year:2020,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Feature Level",fusion_operation:"Concatenation",network:"YOdar based on YOLOv3",dataset:"nuScenes",evaluation_metrics:"AP | mAP",conference_journal:"arXiv",source_code:"-"},{key:"15",name:"CenterFusion: Center-based Radar and Camera Fusion for 3D Object Detection, 15",short_name:"CenterNet",year:2020,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Feature Level",fusion_operation:"Concatenation",network:"CenterNet with DLA backbone",dataset:"nuScenes",evaluation_metrics:"mAP",conference_journal:"2021 WACV",source_code:"https://github.com/mrnabati/CenterFusion"},{key:"16",name:"RODNet: Radar Object Detection Using Cross-Modal Supervision, 16",short_name:"RODNet",year:2020,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Range-Azimuth Tensor",projection:"Camera image to radar range-azimuth coordinates",fusion_level:"Feature Level",fusion_operation:"-",network:"RODNet",dataset:"CRUW",evaluation_metrics:"AP | AR | OLS",conference_journal:"2021 WACV",source_code:"https://github.com/yizhou-wang/RODNet"},{key:"17",name:"RAMP-CNN: A Novel Neural Network for Enhanced Automotive Radar Object Recognition, 17",short_name:"RAMP-CNN",year:2021,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Range-Azimuth-Doppler Tensor",projection:"-",fusion_level:"Feature Level",fusion_operation:"Concatenation",network:"RAMP-CNN",dataset:"CRUW",evaluation_metrics:"-",conference_journal:"IEEE Sensors",source_code:"-"},{key:"18",name:"A Feature Pyramid Fusion Detection Algorithm Based on Radar and Camera Sensor, 18",short_name:"-",year:2021,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Feature Level",fusion_operation:"Multiplication | Concatenation",network:"A network based on YOLOv3",dataset:"nuScenes",evaluation_metrics:"-",conference_journal:"2020 ICSP",source_code:"-"},{key:"19",name:"Low-level Sensor Fusion Network for 3D Vehicle Detection using Radar Range-Azimuth Heatmap and Monocular Image, 19",short_name:"-",year:2020,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Range-Azimuth Tensor",projection:"-",fusion_level:"Feature Level",fusion_operation:"Concatenation",network:"A network based on VGG and FPN",dataset:"Self-Recorded",evaluation_metrics:"-",conference_journal:"2020 ACCV",source_code:"-"},{key:"20",name:"Radar Camera Fusion via Representation Learning in Autonomous Driving, 20",short_name:"AssociationNet",year:2021,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Object Level",fusion_operation:"Transformation matrix | Concatenation",network:"AssociationNet",dataset:"Self-Recorded",evaluation_metrics:"-",conference_journal:"2021 WACV",source_code:"-"},{key:"21",name:"Radar Voxel Fusion for 3D Object Detection, 21",short_name:"RVF-Net",year:2021,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"-",fusion_level:"Data Level",fusion_operation:"Concatenation",network:"RVF-Net based on VoxelNet",dataset:"nuScenes",evaluation_metrics:"AP",conference_journal:"MDPI Apply Science",source_code:"-"},{key:"22",name:"3D Detection and Tracking for On-road Vehicles with a Monovision Camera and Dual Low-cost 4D mmWave Radars, 22",short_name:"-",year:2021,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane | Radar point to BEV",fusion_level:"Mixed Level",fusion_operation:"Concatenation",network:"CNN with SSMA block",dataset:"Astyx",evaluation_metrics:"mAP | Average Heading Similarity (AHS)",conference_journal:"2021 ITSC",source_code:"-"},{key:"23",name:"Robust Small Object Detection on the Water Surface through Fusion of Camera and Millimeter Wave Radar, 23",short_name:"RISFNet",year:2021,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Feature Level",fusion_operation:"Concatenation | Addition | Multiplication",network:"RISFNet based on CSPdarknet53 and VGG",dataset:"FloW",evaluation_metrics:"-",conference_journal:"2020 ICCV",source_code:"-"},{key:"24",name:"GRIF Net: Gated Region of Interest Fusion Network for Robust 3D Object Detection from Radar Point Cloud and Monocular Image, 24",short_name:"GRIF Net",year:2021,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"-",fusion_level:"Feature Level",fusion_operation:"Attention",network:"GRIF Net based on FPN and SBNet",dataset:"nuScenes",evaluation_metrics:"AP",conference_journal:"2020 IROS",source_code:"-"},{key:"25",name:"Fusion Point Pruning for Optimized 2D Object Detection with Radar-Camera Fusion, 25",short_name:"-",year:2021,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to image plane",fusion_level:"Feature Level",fusion_operation:"Addition | Concatenation",network:"A network based on RetinaNet architecture with a ResNet backbone",dataset:"nuScenes",evaluation_metrics:"-",conference_journal:"2022 WACV",source_code:"-"},{key:"26",name:"A Simple Baseline for BEV Perception Without LiDAR, 26",short_name:"-",year:2021,task:"Semantic Segmentation",annotation:"2D pixel-level",radar_data_representation:"Point Cloud",projection:"Radar point to BEV | Camera image to image plane",fusion_level:"Feature Level",fusion_operation:"Concatenation",network:"A network with a ResNet backbone",dataset:"nuScenes",evaluation_metrics:"IOU",conference_journal:"arXiv",source_code:"-"},{key:"27",name:"RadSegNet: A Reliable Approach to Radar Camera Fusion, 27",short_name:"RadSegNet",year:2022,task:"Object Detection",annotation:"2D box-level | 2D pixel-level",radar_data_representation:"Point Cloud | Range-Azimuth Tensor",projection:"Radar point to BEV | Radar point in 3D Cartesian coordinates",fusion_level:"Data-Level",fusion_operation:"Concatenation",network:"RadSegNet",dataset:"Astyx | RADIATE",evaluation_metrics:"-",conference_journal:"arXiv",source_code:"-"},{key:"28",name:"Bridging the View Disparity of Radar and Camera Features for Multi-modal Fusion 3D Object Detection, 28",short_name:"RCBEV",year:2022,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"Radar point to BEV | Image to BEV",fusion_level:"Feature Level",fusion_operation:"Concatenation",network:"RCBEV with Swin Transformer as backbone and FPN as neck",dataset:"nuScenes",evaluation_metrics:"mAP | MTP | NDS",conference_journal:"IEEE Transactions on Intelligent Vehicles",source_code:"-"},{key:"29",name:"CRAFT: Camera-Radar 3D Object Detection with Spatio-Contextual Fusion Transformer, 29",short_name:"CRAFT",year:2022,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"-",fusion_level:"Data Level",fusion_operation:"Concatenation",network:"RCBEV with Swin Transformer as backbone and FPN as neck",dataset:"nuScenes",evaluation_metrics:"-",conference_journal:"2023 AAAI",source_code:"-"},{key:"30",name:"DeepFusion: A Robust and Modular 3D Object Detector for Lidars, Cameras and Radar, 30",short_name:"DeepFusion",year:2022,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"-",fusion_level:"Feature Level",fusion_operation:"Concatenation",network:"DeepFusion",dataset:"Self-reorded | nuScenes",evaluation_metrics:"-",conference_journal:"2022 IROS",source_code:"-"},{key:"31",name:"CramNet: Camera-Radar Fusion with Ray-Constrained Cross-Attention for Robust 3D Object Detection, 31",short_name:"CramNet",year:2022,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Range-Azimuth Tensor",projection:"-",fusion_level:"Feature Level",fusion_operation:"Attention",network:"CramNet",dataset:"RADIATE",evaluation_metrics:"-",conference_journal:"2022 ECCV",source_code:"-"},{key:"32",name:"MVFusion: Multi-View 3D Object Detection with Semantic-aligned Radar and Camera Fusion, 32",short_name:"MVFusion",year:2023,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"-",fusion_level:"Feature Level",fusion_operation:"Concatenation | Addition",network:"MVFusion",dataset:"nuScenes",evaluation_metrics:"-",conference_journal:"2023 ICRA",source_code:"-"},{key:"33",name:"CRN: Camera Radar Net for Accurate, Robust, Efficient 3D Perception, 33",short_name:"CRN",year:2023,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"-",fusion_level:"Feature Level",fusion_operation:"Concatenation",network:"CRN",dataset:"nuScenes",evaluation_metrics:"-",conference_journal:"2023 ICLR",source_code:"-"},{key:"34",name:"RCFusion: Fusing 4D Radar and Camera with Bird's-Eye View Features for 3D Object Detection, 34",short_name:"RCFusion",year:2023,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"-",fusion_level:"Feature Level",fusion_operation:"Concatenation | Multiplication | Attention",network:"RCFusion",dataset:"VoD, TJ4DRadSet",evaluation_metrics:"-",conference_journal:"IEEE TIM",source_code:"-"},{key:"35",name:"LXL: LiDAR Exclusive Lean 3D Object Detection with 4D Imaging Radar and Camera Fusion, 35",short_name:"LXL",year:2023,task:"Object Detection",annotation:"3D box-level",radar_data_representation:"Point Cloud",projection:"-",fusion_level:"Feature Level",fusion_operation:"Concatenation | Multiplication",network:"LXL",dataset:"VoD, TJ4DRadSet",evaluation_metrics:"-",conference_journal:"arXiv",source_code:"-"},{key:"36",name:"Achelous: A Fast Unified Water-surface Panoptic Perception Framework based on Fusion of Monocular Camera and 4D mmWave Radar, 36",short_name:"Achelous",year:2023,task:"Object Detection",annotation:"2D box-level",radar_data_representation:"Point Cloud",projection:"-",fusion_level:"Data Level",fusion_operation:"Concatenation",network:"Achelous",dataset:"WaterScenes",evaluation_metrics:"-",conference_journal:"IEEE ITSC",source_code:"https://github.com/GuanRunwei/Achelous"}],f=function(i,l,m,s){console.log("params",i,l,m,s)};return(0,e.jsxs)("div",n()(n()(n()({},o),u.wrapper),{},{id:"methods",children:[(0,e.jsx)("div",{className:"title-wrapper",children:(0,e.jsx)("h2",{name:"title",className:"title-h1",children:"Radar-Camera Fusion Methods"})}),(0,e.jsx)(w.Z,{bordered:!0,scroll:{x:"200px"},columns:g,dataSource:h,onChange:f})]}))}}]),r}(R.PureComponent),ke=je,Ae=function(C){j()(r,C);var v=k()(r);function r(){return x()(this,r),v.apply(this,arguments)}return S()(r,[{key:"render",value:function(){var o=Object.assign({},(A()(this.props),this.props)),u=o.dataSource;delete o.dataSource,delete o.isMobile;var g=["S. Chadwick, W. Maddetn, and P. Newman, \u201CDistant vehicle detection using radar and vision,\u201D Proceedings - IEEE International Conference on Robotics and Automation, vol. 2019-May, pp. 8311\u20148317, 2019.","M. Meyer and G. Kuschk, \u201CAstyx: Automotive radar dataset for deep learning based 3D object detection,\u201D EuRAD 2019 - 2019 16th European Radar Conference, pp. 129\u2014132, 2019.","M. Bijelic, T. Gruber, F. Mannan, F. Kraus, W. Ritter, K. Dietmayer, and F. Heide, \u201CSeeing Through Fog Without Seeing Fog: Deep Multimodal Sensor Fusion in Unseen Adverse Weather,\u201D 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), vol. 00, pp. 11 679\u201311 689, 2020.","A. Ouaknine, A. Newson, J. Rebut, F. Tupin, and P. Perez, \u201CCARRADA dataset: Camera and automotive radar with range-Angle-doppler annotations,\u201D arXiv, 2020.","J. Guan, S. Madani, S. Jog, S. Gupta, and H. Hassanieh, \u201CThrough Fog High-Resolution Imaging Using Millimeter Wave Radar,\u201D ser. Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2020, pp. 11 461\u201411 470.","M. Mostajabi, C. M. Wang, D. Ranjan, and G. Hsyu, \u201CHigh resolution radar dataset for semi-supervised learning of dynamic objects,\u201D IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops, vol. 2020-June, pp. 450\u2014457, 2020.","M. Sheeny, E. De Pellegrin, S. Mukherjee, A. Ahrabian, S. Wang, and A. Wallace, \u201CRadiate: A radar dataset for automotive perception in bad weather,\u201D in 2021 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2021, pp. 1\u20137.","X. Weng, Y. Man, D. Cheng, J. Park, M. O\u2019Toole, and K. Kitani, \u201CAll-In-One Drive: A Large-Scale Comprehensive Perception Dataset with High-Density Long-Range Point Clouds.\u201D","Y. Wang, G. Wang, H.-M. Hsu, H. Liu, and J.-N. Hwang, \u201CRethinking of Radar\u2019s Role: A Camera-Radar Dataset and Systematic Annotator via Coordinate Alignment,\u201D in CVPRW, 2021.","T.-Y. Lim, S. A. Markowitz, and M. N. Do, \u201CRaDICaL: A Synchronized FMCW Radar, Depth, IMU and RGB Camera Data Dataset with Low-Level FMCW Radar Signals.\u201D","O. Schumann, M. Hahn, N. Scheiner, F. Weishaupt, J. F. Tilly, J. Dickmann, and C. Wohler, \u201CRadarScenes: A Real-World Radar Point Cloud Data Set for Automotive Applications,\u201D 2021. [Online]. Available: http://arxiv.org/abs/2104.02493","A. Zhang, F. E. Nowruzi, and R. Laganiere, \u201CRADDet: Range-Azimuth-Doppler based Radar Object Detection for Dynamic Road Users,\u201D 2021 18th Conference on Robots and Vision (CRV), vol. 00, pp. 95\u2013102, 2021.","Y. Cheng, J. Zhu, M. Jiang, J. Fu, C. Pang1, P. Wang1, K. Sankaran3, O. Onabola3, Y. Liu2, D. Liu3, and Y. Bengio3, \u201CFloW: A Dataset and Benchmark for Floating Waste Detection in Inland Waters,\u201D ser. ICCV, 2021.","J. Rebut, A. Ouaknine, W. Malik, and P. Pe \u0301rez, \u201CRaw high-definition radar for multi-task learning,\u201D in Proceedings of the IEEE/CVF Confer- ence on Computer Vision and Pattern Recognition, 2022, pp. 17 021\u2013 17 030.","A. Palffy, E. Pool, S. Baratam, J. Kooij, and D. Gavrila, \u201CMulti-class Road User Detection with 3+1D Radar in the View-of-Delft Dataset,\u201D IEEE Robotics and Automation Letters, vol. PP, no. 99, pp. 1\u20131, 2022.","K. Burnett, D. J. Yoon, Y. Wu, A. Z. Li, H. Zhang, S. Lu, J. Qian, W.-K. Tseng, A. Lambert, K. Y. K. Leung, A. P. Schoellig, and T. D. Barfoot, \u201CBoreas: A Multi-Season Autonomous Driving Dataset,\u201D arXiv, 2022.","A. Palffy, E. Pool, S. Baratam, J. Kooij, and D. Gavrila, \u201CMulti-class Road User Detection with 3+1D Radar in the View-of-Delft Dataset,\u201D IEEE Robotics and Automation Letters, vol. PP, no. 99, pp. 1\u20131, 2022.","D.-H. Paek, S.-H. Kong, and K. T. Wijaya, \u201CK-Radar: 4D Radar Object Detection Dataset and Benchmark for Autonomous Driving in Various Weather Conditions,\u201D arXiv, 2022.","T.Matuszka,I.Barton,A \u0301.Butykai,P.Hajas,D.Kiss,D.Kova \u0301cs, S. Kunsa \u0301gi-Ma \u0301te \u0301, P. Lengyel, G. Ne \u0301meth, L. Peto \u030B et al., \u201Caimotive dataset: A multimodal dataset for robust autonomous driving with long-range perception,\u201D arXiv preprint arXiv:2211.09445, 2022.","Yao S, Guan R, Wu Z, et al. Waterscenes: A multi-task 4d radar-camera fusion dataset and benchmark for autonomous driving on water surfaces[J]. arXiv preprint arXiv:2307.06505, 2023."],h=["S. Chadwick, W. Maddern, and P. Newman, \u201CDistant vehicle detection using radar and vision,\u201D in 2019 International Conference on Robotics and Automation (ICRA). IEEE, 2019, pp. 8311\u20138317.","R. Nabati and H. Qi, \u201CRrpn: Radar region proposal network for object detection in autonomous vehicles,\u201D in 2019 IEEE International Conference on Image Processing (ICIP). IEEE, 2019, pp. 3093\u20133097.","H. Jha, V. Lodhi, and D. Chakravarty, \u201CObject Detection and Identification Using Vision and Radar Data Fusion System for Ground-Based Navigation,\u201D 2019 6th International Conference on Signal Processing and Integrated Networks (SPIN), vol. 00, pp. 590\u2013593, 2019.","V. Lekic and Z. Babic, \u201CAutomotive radar and camera fusion using generative adversarial networks,\u201D Computer Vision and Image Under- standing, vol. 184, pp. 1\u20138, 2019.","M. Meyer and G. Kuschk, \u201CDeep learning based 3d object detection for automotive radar and camera,\u201D in 2019 16th European Radar Conference (EuRAD). IEEE, 2019, pp. 133\u2013136.","V. John and S. Mita, \u201CRvnet: Deep sensor fusion of monocular camera and radar for image-based obstacle detection in challenging environments,\u201D in Image and Video Technology: 9th Pacific-Rim Sym- posium, PSIVT 2019, Sydney, NSW, Australia, November 18\u201322, 2019, Proceedings 9. Springer, 2019, pp. 351\u2013364.","T.-Y. Lim, A. Ansari, B. Major, D. Fontijne, M. Hamilton, R. Gowaikar, and S. Subramanian, \u201CRadar and camera early fusion for vehicle detection in advanced driver assistance systems,\u201D in Machine learning for autonomous driving workshop at the 33rd conference on neural information processing systems, vol. 2, 2019, p. 7.","V. John, M. K. Nithilan, S. Mita, H. Tehrani, R. S. Sudheesh, and P. P. Lalu, \u201CSO-Net: Joint Semantic Segmentation and Obstacle Detection Using Deep Fusion of Monocular Camera and Radar,\u201D Lecture Notes in Computer Science, pp. 138\u2013148, 2020.","S. Chang, Y. Zhang, F. Zhang, X. Zhao, S. Huang, Z. Feng, and Z. Wei, \u201CSpatial attention fusion for obstacle detection using mmwave radar and vision sensor,\u201D Sensors, vol. 20, no. 4, p. 956, 2020.","F. Nobis, M. Geisslinger, M. Weber, J. Betz, and M. Lienkamp, \u201CA Deep learning-based radar and camera sensor fusion architecture for object detection,\u201D arXiv, 2020.","M. Bijelic, T. Gruber, F. Mannan, F. Kraus, W. Ritter, K. Dietmayer, and F. Heide, \u201CSeeing through fog without seeing fog: Deep multimodal sensor fusion in unseen adverse weather,\u201D in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 11 682\u201311 692.","R. Yadav, A. Vierling, and K. Berns, \u201CRadar+ rgb attentive fusion for robust object detection in autonomous vehicles,\u201D arXiv preprint arXiv:2008.13642, 2020.","R. Nabati and H. Qi, \u201CRadar-camera sensor fusion for joint object detection and distance estimation in autonomous vehicles,\u201D arXiv preprint arXiv:2009.08428, 2020","K. Kowol, M. Rottmann, S. Bracke, and H. Gottschalk, \u201CYodar: uncertainty-based sensor fusion for vehicle detection with camera and radar sensors,\u201D arXiv preprint arXiv:2010.03320, 2020.","Y. Wang, Z. Jiang, X. Gao, J.-N. Hwang, G. Xing, and H. Liu, \u201CRODNet: Radar Object Detection using Cross-Modal Supervision,\u201D 2021 IEEE Winter Conference on Applications of Computer Vision(WACV), vol. 00, pp. 504\u2013513, 2021.","X. Gao, G. Xing, S. Roy, and H. Liu, \u201CRAMP-CNN: A Novel Neural Network for Enhanced Automotive Radar Object Recognition,\u201D IEEE Sensors Journal, vol. 21, no. 4, pp. 5119\u20145132, 2021.","L.-q. Li and Y.-l. Xie, \u201CA Feature Pyramid Fusion Detection Algorithm Based on Radar and Camera Sensor,\u201D 2020 15th IEEE International Conference on Signal Processing (ICSP), vol. 1, pp. 366\u2013370, 2020.","J. Kim, Y. Kim, and D. Kum, \u201CLow-level Sensor Fusion for 3D Vehicle Detection using Radar Range-Azimuth Heatmap and Monocular Image,\u201D Lecture Notes in Computer Science, pp. 388\u2013402, 2021.","X. Dong, B. Zhuang, Y. Mao, and L. Liu, \u201CRadar Camera Fusion via Representation Learning in Autonomous Driving,\u201D 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), vol. 00, pp. 1672\u20131681, 2021.","F. Nobis, E. Shafiei, P. Karle, J. Betz, and M. Lienkamp, \u201CRadar Voxel Fusion for 3D Object Detection,\u201D Applied Sciences, vol. 11, no. 12, p. 5598, 2021.","H. Cui, J. Wu, J. Zhang, G. Chowdhary, and W. R. Norris, \u201C3D Detection and Tracking for On-road Vehicles with a Monovision Camera and Dual Low-cost 4D mmWave Radars,\u201D 2021 IEEE International Intelligent Transportation Systems Conference (ITSC), vol. 00, pp. 2931\u20132937, 2021.","Y. Cheng, H. Xu, and Y. Liu, \u201CRobust Small Object Detection on the Water Surface through Fusion of Camera and Millimeter Wave Radar,\u201D ser. ICCV, 2021.","Y. Kim, J. W. Choi, and D. Kum, \u201CGRIF Net: Gated Region of Interest Fusion Network for Robust 3D Object Detection from Radar Point Cloud and Monocular Image,\u201D 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), vol. 00, pp. 10 857\u201310 864, 2021.","L. Sta \u0308cker, P. Heidenreich, J. Rambach, and D. Stricker, \u201CFusion Point Pruning for Optimized 2D Object Detection with Radar-Camera Fusion,\u201D 2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), vol. 00, pp. 1275\u20131282, 2022.","A. W. Harley, Z. Fang, J. Li, R. Ambrus, and K. Fragkiadaki, \u201CA Simple Baseline for BEV Perception Without LiDAR,\u201D arXiv, 2022.","K. Bansal, K. Rungta, and D. Bharadia, \u201CRadSegNet: A Reliable Approach to Radar Camera Fusion,\u201D arXiv, 2022.","T. Zhou, J. Chen, Y. Shi, K. Jiang, M. Yang, and D. Yang, \u201CBridging the view disparity between radar and camera features for multi-modal fusion 3d object detection,\u201D IEEE Transactions on Intelligent Vehicles, vol. 8, no. 2, pp. 1523\u20131535, 2023.","Y. Kim, S. Kim, J. W. Choi, and D. Kum, \u201CCRAFT: Camera-Radar 3D Object Detection with Spatio-Contextual Fusion Transformer,\u201D arXiv, 2022.","F. Drews, D. Feng, F. Faion, L. Rosenbaum, M. Ulrich, and C. Gla \u0308ser, \u201CDeepfusion: A robust and modular 3d object detector for lidars, cameras and radars,\u201D in 2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2022, pp. 560\u2013567.","J.-J. Hwang, H. Kretzschmar, J. Manela, S. Rafferty, N. Armstrong- Crews, T. Chen, and D. Anguelov, \u201CCramnet: Camera-radar fusion with ray-constrained cross-attention for robust 3d object detection,\u201D in Computer Vision\u2013ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part XXXVIII. Springer, 2022, pp. 388\u2013405.","Z. Wu, G. Chen, Y. Gan, L. Wang, and J. Pu, \u201CMvfusion: Multi-view 3d object detection with semantic-aligned radar and camera fusion,\u201D arXiv preprint arXiv:2302.10511, 2023.","Y. Kim, S. Kim, J. Shin, J. W. Choi, and D. Kum, \u201CCrn: Camera radar net for accurate, robust, efficient 3d perception,\u201D arXiv preprint arXiv:2304.00670, 2023.","Zheng L, Li S, Tan B, et al. RCFusion: Fusing 4D Radar and Camera with Bird\u2019s-Eye View Features for 3D Object Detection[J]. IEEE Transactions on Instrumentation and Measurement, 2023.","Xiong W, Liu J, Huang T, et al. LXL: LiDAR Exclusive Lean 3D Object Detection with 4D Imaging Radar and Camera Fusion[J]. arXiv preprint arXiv:2307.00724, 2023.","Guan R, Yao S, Zhu X, et al. Achelous: A Fast Unified Water-surface Panoptic Perception Framework based on Fusion of Monocular Camera and 4D mmWave Radar[J]. arXiv preprint arXiv:2307.07102, 2023."],f=g.map(function(i,l){return(0,e.jsxs)("p",{children:["[",l+1,"] ",i]})}),p=h.map(function(i,l){return(0,e.jsxs)("p",{children:["[",l+1+g.length,"] ",i]})});return(0,e.jsxs)("div",{className:"home-page-wrapper content12-wrapper",id:"references",children:[(0,e.jsxs)("div",{className:"content12",style:{height:"400px",overflowY:"scroll"},children:[(0,e.jsx)("h1",{name:"title",class:"title-h1",children:"References"}),(0,e.jsxs)("span",{children:[f,p]})]}),(0,e.jsxs)("div",{className:"content12",id:"citation",children:[(0,e.jsx)("h1",{name:"title",class:"title-h1",children:"Citation"}),(0,e.jsx)("div",{style:{backgroundColor:"#f3f6fa",padding:"10px"},children:(0,e.jsxs)("code",{children:["@misc{yao2023radarcamera,",(0,e.jsx)("br",{}),"\xA0\xA0\xA0\xA0","title={Radar-Camera Fusion for Object Detection and Semantic Segmentation in Autonomous Driving: A Comprehensive Review}, ",(0,e.jsx)("br",{}),"\xA0\xA0\xA0\xA0","author={Shanliang Yao and Runwei Guan and Xiaoyu Huang and Zhuoxiao Li and Xiangyu Sha and Yong Yue and Eng Gee Lim and Hyungjoon Seo and Ka Lok Man and Xiaohui Zhu and Yutao Yue},",(0,e.jsx)("br",{}),"\xA0\xA0\xA0\xA0","year={2023},",(0,e.jsx)("br",{}),"\xA0\xA0\xA0\xA0","eprint={2304.10410},",(0,e.jsx)("br",{}),"\xA0\xA0\xA0\xA0","archivePrefix={arXiv},",(0,e.jsx)("br",{}),"\xA0\xA0\xA0\xA0","primaryClass={cs.CV}",(0,e.jsx)("br",{}),"}"]})}),(0,e.jsxs)("h3",{name:"title",class:"title-h1",style:{margin:"10px 0 0 0"},children:["Awesome Radar-Camera Fusion: ",(0,e.jsx)("a",{target:"_blank",href:"https://github.com/Radar-Camera-Fusion/Awesome-Radar-Camera-Fusion",children:"https://github.com/Radar-Camera-Fusion/Awesome-Radar-Camera-Fusion"})]})]})]})}}]),r}(R.PureComponent),_e=Ae,Pe=function(C){j()(r,C);var v=k()(r);function r(){return x()(this,r),v.apply(this,arguments)}return S()(r,[{key:"render",value:function(){var o=Object.assign({},(A()(this.props),this.props)),u=o.dataSource;return delete o.dataSource,delete o.isMobile,(0,e.jsx)(B(),n()(n()(n()({},o),u.OverPack),{},{children:(0,e.jsx)(z.Z,n()(n()({type:"bottom",leaveReverse:!0,delay:[0,100]},u.titleWrapper),{},{children:u.titleWrapper.children.map(U)}),"page")}))}}]),r}(R.PureComponent),we=Pe,Ne=function(C){j()(r,C);var v=k()(r);function r(){return x()(this,r),v.apply(this,arguments)}return S()(r,[{key:"render",value:function(){var o=o||[];(function(){var h=document.createElement("script");h.src="https://hm.baidu.com/hm.js?58d144a733fcb2ea441a68157d15c700";var f=document.getElementsByTagName("script")[0];f.parentNode.insertBefore(h,f)})();var u=Object.assign({},(A()(this.props),this.props)),g=u.dataSource;return delete u.dataSource,delete u.isMobile,(0,e.jsx)("div",n()(n()(n()({},u),g.wrapper),{},{children:(0,e.jsx)(B(),n()(n()({},g.OverPack),{},{children:(0,e.jsx)(M.ZP,n()(n()({animation:{y:"+=30",opacity:0,type:"from"}},g.copyright),{},{children:g.copyright.children}),"footer")}))}))}}]),r}(R.PureComponent),Fe=Ne,Oe=c.p+"static/logo.a4911f79.png",Ie={isScrollLink:!0,wrapper:{className:"header2 home-page-wrapper jrhtw9ph4a-editor_css"},page:{className:"home-page"},logo:{className:"header2-logo",children:Oe},LinkMenu:{className:"header2-menu",children:[{name:"linkNav",to:"characteristics",children:"Characteristics",className:"menu-item"},{name:"linkNav",to:"datasets",children:"Datasets",className:"menu-item"},{name:"linkNav",to:"methods",children:"Methods",className:"menu-item"},{name:"linkNav",to:"citation",children:"Citation",className:"menu-item"},{name:"linkNav",to:"https://github.com/Radar-Camera-Fusion/Awesome-Radar-Camera-Fusion",children:"Github",className:"menu-item"}]},mobileMenu:{className:"header2-mobile-menu"},Menu:{children:[{name:"Banner3_0",to:"Banner3_0",children:"\u9996\u9875",className:"active menu-item"},{name:"Content8_0",to:"Content8_0",children:"\u7279\u9080\u5609\u5BBE",className:"menu-item"},{name:"Content9_0",to:"Content9_0",children:"\u4F1A\u8BAE\u65E5\u7A0B",className:"menu-item"},{name:"Content10_0",to:"Content10_0",children:"\u5927\u4F1A\u5730\u5740",className:"menu-item"},{name:"Content11_0",to:"Content11_0",children:"\u5C55\u53F0\u5C55\u793A",className:"menu-item"},{name:"Content12_0",to:"Content12_0",children:"\u7279\u522B\u9E23\u8C22",className:"menu-item"}]}},Me={wrapper:{className:"banner3"},textWrapper:{className:"banner3-text-wrapper",children:[{name:"slogan",className:"banner3-slogan",children:"Radar-Camera Fusion for Object Detection and Semantic Segmentation in Autonomous Driving: A Comprehensive Review"},{name:"nameEn",className:"banner3-name-en",children:"Shanliang Yao, Runwei Guan, Xiaoyu Huang, Zhuoxiao Li, Xiangyu Sha, "},{name:"nameEn",className:"banner3-name-en",children:"Yong Yue, Eng Gee Lim, Hyungjoon Seo, Ka Lok Man, Xiaohui Zhu, Yutao Yue"},{name:"time",className:"banner3-time",children:"University of Liverpool, Xi\u2018an Jiaotong-Liverpool University, Institute of Deep Perception Technology, JITRI"}]}},Te={OverPack:{className:"home-page-wrapper content13-wrapper",playScale:.3},titleWrapper:{className:"title-wrapper",children:[]}},Ee={wrapper:{className:"home-page-wrapper footer0-wrapper"},OverPack:{className:"home-page footer0",playScale:.01},copyright:{className:"copyright",children:(0,e.jsxs)("span",{children:["\xA92023 ",(0,e.jsx)("a",{href:"https://github.com/XJTLU-VEC",children:"XJTLU-VEC"})," All Rights Reserved"]})}},J={wrapper:{className:"home-page-wrapper"},OverPack:{className:"home-page",playScale:.05},copyright:{className:"copyright",children:(0,e.jsxs)("span",{children:["\xA92018 ",(0,e.jsx)("a",{href:"https://motion.ant.design",children:"Ant Motion"})," All Rights Reserved"]})}},Le={wrapper:{className:"home-page-wrapper"},OverPack:{className:"home-page",playScale:.05},copyright:{className:"copyright",children:(0,e.jsxs)("span",{children:["\xA92018 ",(0,e.jsx)("a",{href:"https://motion.ant.design",children:"Ant Motion"})," All Rights Reserved"]})}},Ve={wrapper:{className:"home-page-wrapper content12-wrapper"},OverPack:{className:"home-page content12",playScale:.05}},Ke={wrapper:{className:"home-page-wrapper pricing2-wrapper"},page:{className:"home-page pricing2"},OverPack:{playScale:.3,className:"pricing2-content-wrapper"},titleWrapper:{className:"pricing2-title-wrapper",children:[{name:"title",children:"Comparison of Different Sensors",className:"pricing2-title-h1"}]},Table:{name:"tabsTitle",size:"default",className:"pricing2-table",columns:{children:[{dataIndex:"name",key:"name",name:"empty",childWrapper:{children:[{name:"name",children:" "},{name:"content",children:" "}]}},{dataIndex:"free",key:"free",name:"free",childWrapper:{className:"pricing2-table-name-block",children:[{name:"name",className:"pricing2-table-name",children:(0,e.jsx)("span",{children:(0,e.jsxs)("p",{children:[(0,e.jsx)("span",{children:"Camera"}),(0,e.jsx)("br",{})]})})}]}},{dataIndex:"basic",key:"basic",name:"basic",childWrapper:{className:"pricing2-table-name-block",children:[{name:"name",className:"pricing2-table-name",children:(0,e.jsx)("span",{children:(0,e.jsx)("span",{children:(0,e.jsx)("p",{children:"Radar"})})})}]}},{dataIndex:"pro",key:"pro",name:"pro",childWrapper:{className:"pricing2-table-name-block",children:[{name:"name",className:"pricing2-table-name",children:(0,e.jsx)("span",{children:(0,e.jsx)("p",{children:"LiDAR"})})}]}}]},dataSource:{children:[{name:"list0",children:[{className:"pricing2-table-content-name",name:"name",children:"Color, Texture, Shape"},{name:"content1",children:"images/start-fill.svg",className:"pricing2-table-content"},{children:"Unlimited",name:"content1",className:"pricing2-table-content"},{children:"Unlimited",name:"content2",className:"pricing2-table-content"},{children:"Unlimited",name:"content3",className:"pricing2-table-content"}]},{name:"list1",children:[{className:"pricing2-table-content-name",name:"name",children:"Range Measurement"},{children:"Limited",name:"content0",className:"pricing2-table-content"},{children:"Unlimited",name:"content1",className:"pricing2-table-content"},{children:"Unlimited",name:"content2",className:"pricing2-table-content"},{children:"Unlimited",name:"content3",className:"pricing2-table-content"}]},{name:"list2",children:[{className:"pricing2-table-content-name",name:"name",children:"Velocity Measurement"},{name:"content0",children:"50GB",className:"pricing2-table-content"},{name:"content1",children:"250GB",className:"pricing2-table-content"},{name:"content2",children:"600GB",className:"pricing2-table-content"},{name:"content3",children:"Unlimited",className:"pricing2-table-content"}]},{name:"list3",children:[{className:"pricing2-table-content-name",name:"name",children:"Lighting Robustness"},{children:"-",name:"content0",className:"pricing2-table-content"},{name:"content1",children:"https://gw.alipayobjects.com/zos/basement_prod/14ce3060-34e6-4b30-9a45-1a6b95542310.svg",className:"pricing2-table-content"},{name:"content2",children:"https://gw.alipayobjects.com/zos/basement_prod/14ce3060-34e6-4b30-9a45-1a6b95542310.svg",className:"pricing2-table-content"},{name:"content3",children:"https://gw.alipayobjects.com/zos/basement_prod/14ce3060-34e6-4b30-9a45-1a6b95542310.svg",className:"pricing2-table-content"}]},{name:"list4",children:[{className:"pricing2-table-content-name",name:"name",children:"Weather Robustness"},{name:"content0",children:"-",className:"pricing2-table-content"},{name:"content1",children:"-",className:"pricing2-table-content"},{name:"content2",children:"https://gw.alipayobjects.com/zos/basement_prod/14ce3060-34e6-4b30-9a45-1a6b95542310.svg",className:"pricing2-table-content"},{name:"content3",children:"https://gw.alipayobjects.com/zos/basement_prod/14ce3060-34e6-4b30-9a45-1a6b95542310.svg",className:"pricing2-table-content"}]},{name:"list5",children:[{className:"pricing2-table-content-name",name:"name",children:"Classification Ability"},{name:"content0",children:"-",className:"pricing2-table-content"},{name:"content1",children:"-",className:"pricing2-table-content"},{name:"content2",children:"-",className:"pricing2-table-content"},{name:"content3",children:"https://gw.alipayobjects.com/zos/basement_prod/14ce3060-34e6-4b30-9a45-1a6b95542310.svg",className:"pricing2-table-content"}]},{name:"list5",children:[{className:"pricing2-table-content-name",name:"name",children:"3D Perception"},{name:"content0",children:"-",className:"pricing2-table-content"},{name:"content1",children:"-",className:"pricing2-table-content"},{name:"content2",children:"-",className:"pricing2-table-content"},{name:"content3",children:"https://gw.alipayobjects.com/zos/basement_prod/14ce3060-34e6-4b30-9a45-1a6b95542310.svg",className:"pricing2-table-content"}]},{name:"list5",children:[{className:"pricing2-table-content-name",name:"name",children:"System Cost"},{name:"content0",children:"-",className:"pricing2-table-content"},{name:"content1",children:"-",className:"pricing2-table-content"},{name:"content2",children:"-",className:"pricing2-table-content"},{name:"content3",children:"https://gw.alipayobjects.com/zos/basement_prod/14ce3060-34e6-4b30-9a45-1a6b95542310.svg",className:"pricing2-table-content"}]}]}}},Z;(0,V.ac)(function(C){Z=C});var We=typeof window!="undefined"?window:{},X=We.location,H=X===void 0?{}:X,ze=function(C){j()(r,C);var v=k()(r);function r(y){var o;return x()(this,r),o=v.call(this,y),o.state={isMobile:Z,show:!H.port},o}return S()(r,[{key:"componentDidMount",value:function(){var o=this;(0,V.ac)(function(u){o.setState({isMobile:!!u})}),H.port&&setTimeout(function(){o.setState({show:!0})},500)}},{key:"render",value:function(){var o=this,u=[(0,e.jsx)(ie,{id:"Nav0_0",dataSource:Ie,isMobile:this.state.isMobile},"Nav0_0"),(0,e.jsx)(ue,{id:"Banner3_0",dataSource:Me,isMobile:this.state.isMobile},"Banner3_0"),(0,e.jsx)(De,{id:"Dataset0_0",dataSource:J,isMobile:this.state.isMobile},"Dataset0_0"),(0,e.jsx)(Se,{id:"Dataset0_0",dataSource:J,isMobile:this.state.isMobile},"Dataset0_0"),(0,e.jsx)(ke,{id:"Fusion0_0",dataSource:Le,isMobile:this.state.isMobile},"Fusion0_0"),(0,e.jsx)(_e,{id:"Fusion0_0",dataSource:Ve,isMobile:this.state.isMobile},"Fusion0_0"),(0,e.jsx)(we,{id:"Content13_0",dataSource:Te,isMobile:this.state.isMobile},"Content13_0"),(0,e.jsx)(Fe,{id:"Footer0_0",dataSource:Ee,isMobile:this.state.isMobile},"Footer0_0")];return(0,e.jsx)("div",{className:"templates-wrapper",ref:function(h){o.dom=h},children:this.state.show&&u})}}]),r}(R.Component)}}]);