diff --git a/index.json b/index.json
index db08eb3..807e15a 100644
--- a/index.json
+++ b/index.json
@@ -69,4 +69,4 @@
- [{"authors":["jae wan"],"categories":null,"content":"","date":1717113600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1717113600,"objectID":"6ef01836a0fda1169e3f8c3cbb6adb51","permalink":"http://localhost:1313/author/jae-wan-park/","publishdate":"2024-05-31T00:00:00Z","relpermalink":"/author/jae-wan-park/","section":"authors","summary":"","tags":null,"title":"Jae Wan Park","type":"authors"},{"authors":["jinyeong"],"categories":null,"content":"","date":1716940800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1716940800,"objectID":"4bd6e37cc21ca9840dece27b03aab4eb","permalink":"http://localhost:1313/author/jinyeong-kim/","publishdate":"2024-05-29T00:00:00Z","relpermalink":"/author/jinyeong-kim/","section":"authors","summary":"","tags":null,"title":"Jinyeong Kim","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1714521600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1714521600,"objectID":"a1c3fca72509f7369afe1ae0d0d66b3c","permalink":"http://localhost:1313/author/hyunkyung-kwon/","publishdate":"2024-05-01T00:00:00Z","relpermalink":"/author/hyunkyung-kwon/","section":"authors","summary":"","tags":null,"title":"Hyunkyung Kwon","type":"authors"},{"authors":["jiwoo"],"categories":null,"content":"","date":1709251200,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1709251200,"objectID":"36ff167c0dda1f64f8379f3c4ab0b0a9","permalink":"http://localhost:1313/author/jiwoo-park/","publishdate":"2024-03-01T00:00:00Z","relpermalink":"/author/jiwoo-park/","section":"authors","summary":"","tags":null,"title":"Jiwoo Park","type":"authors"},{"authors":["suhyun"],"categories":null,"content":"","date":1705881600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1705881600,"objectID":"1cb8c53dd729e6257464d64e5e30b8c2","permalink":"http://localhost:1313/author/suhyun-kim/","publishdate":"2024-01-22T00:00:00Z","relpermalink":"/author/suhyun-kim/","section":"authors","summary":"","tags":null,"title":"Suhyun Kim","type":"authors"},{"authors":["jinyeong"],"categories":null,"content":"","date":1704067200,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1704067200,"objectID":"dbad4d12ff85d584209761f88fcb9051","permalink":"http://localhost:1313/author/chanyong-yoon/","publishdate":"2024-01-01T00:00:00Z","relpermalink":"/author/chanyong-yoon/","section":"authors","summary":"","tags":null,"title":"Chanyong Yoon","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1704067200,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1704067200,"objectID":"2488f997c890bd559820a9767ca734e9","permalink":"http://localhost:1313/author/tae-eun-choi/","publishdate":"2024-01-01T00:00:00Z","relpermalink":"/author/tae-eun-choi/","section":"authors","summary":"","tags":null,"title":"Tae Eun Choi","type":"authors"},{"authors":["junhyeok"],"categories":null,"content":"… is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.\nLorem ipsum dolor sit amet, consectetur adipiscing elit. Sed neque elit, tristique placerat feugiat ac, facilisis vitae arcu. Proin eget egestas augue. Praesent ut sem nec arcu pellentesque aliquet. Duis dapibus diam vel metus tempus vulputate.\n","date":1698796800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1698796800,"objectID":"0a8c118bd2666ffea4cc0e48ab8c6969","permalink":"http://localhost:1313/author/junhyeok-kim/","publishdate":"2023-11-01T00:00:00Z","relpermalink":"/author/junhyeok-kim/","section":"authors","summary":"… is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.","tags":null,"title":"Junhyeok Kim","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1693785600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1693785600,"objectID":"09510750f6aa7cb670194d983a945aff","permalink":"http://localhost:1313/author/sujung-hong/","publishdate":"2023-09-04T00:00:00Z","relpermalink":"/author/sujung-hong/","section":"authors","summary":"","tags":null,"title":"Sujung Hong","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1693785600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1693785600,"objectID":"d6770fe491922eab5700c553acfa2414","permalink":"http://localhost:1313/author/youngjun-jun/","publishdate":"2023-09-04T00:00:00Z","relpermalink":"/author/youngjun-jun/","section":"authors","summary":"","tags":null,"title":"Youngjun Jun","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1678665600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1678665600,"objectID":"4a50e6146f083cb942b43d3f3d0dda8a","permalink":"http://localhost:1313/author/chanyoung-kim/","publishdate":"2023-03-13T00:00:00Z","relpermalink":"/author/chanyoung-kim/","section":"authors","summary":"","tags":null,"title":"Chanyoung Kim","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1678665600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1678665600,"objectID":"22aa61c95c3e9abdb51802ad21a0fe73","permalink":"http://localhost:1313/author/dayun-ju/","publishdate":"2023-03-13T00:00:00Z","relpermalink":"/author/dayun-ju/","section":"authors","summary":"","tags":null,"title":"Dayun Ju","type":"authors"},{"authors":["jaehoon"],"categories":null,"content":"","date":1669852800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1669852800,"objectID":"665c1fe62ac64673fa6cf51b7804422b","permalink":"http://localhost:1313/author/jaehoon-joo/","publishdate":"2022-12-01T00:00:00Z","relpermalink":"/author/jaehoon-joo/","section":"authors","summary":"","tags":null,"title":"Jaehoon Joo","type":"authors"},{"authors":["gayoon"],"categories":null,"content":"","date":1665532800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1665532800,"objectID":"8f41a3d9f711900f4736bfc5257e7613","permalink":"http://localhost:1313/author/gayoon-choi/","publishdate":"2022-10-12T00:00:00Z","relpermalink":"/author/gayoon-choi/","section":"authors","summary":"","tags":null,"title":"Gayoon Choi","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1661990400,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1661990400,"objectID":"17273d0a97fcfb096f33ebf5e551afb9","permalink":"http://localhost:1313/author/yumin-kim/","publishdate":"2022-09-01T00:00:00Z","relpermalink":"/author/yumin-kim/","section":"authors","summary":"","tags":null,"title":"Yumin Kim","type":"authors"},{"authors":["taejin"],"categories":null,"content":"","date":1657929600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1657929600,"objectID":"754d3a47a9b66a96798ad9aecfefaf48","permalink":"http://localhost:1313/author/taejin-jeong/","publishdate":"2022-07-16T00:00:00Z","relpermalink":"/author/taejin-jeong/","section":"authors","summary":"","tags":null,"title":"Taejin Jeong","type":"authors"},{"authors":["donghyun"],"categories":null,"content":"","date":1657843200,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1657843200,"objectID":"b7c02d3f0f41186bce5fa3f8a2d2da73","permalink":"http://localhost:1313/author/donghyun-kim/","publishdate":"2022-07-15T00:00:00Z","relpermalink":"/author/donghyun-kim/","section":"authors","summary":"","tags":null,"title":"Donghyun Kim","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1647561600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1647561600,"objectID":"7b7cc2c60afbdbe6f96d828e5007fc9b","permalink":"http://localhost:1313/author/kyobin-choo/","publishdate":"2022-03-18T00:00:00Z","relpermalink":"/author/kyobin-choo/","section":"authors","summary":"","tags":null,"title":"Kyobin Choo","type":"authors"},{"authors":["yujin"],"categories":null,"content":"","date":1646092800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1646092800,"objectID":"2974b3af626d9d6043aad3c10be10e48","permalink":"http://localhost:1313/author/yujin-yang/","publishdate":"2022-03-01T00:00:00Z","relpermalink":"/author/yujin-yang/","section":"authors","summary":"","tags":null,"title":"Yujin Yang","type":"authors"},{"authors":["Kim"],"categories":null,"content":"吳恩達 is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.\nLorem ipsum dolor sit amet, consectetur adipiscing elit. Sed neque elit, tristique placerat feugiat ac, facilisis vitae arcu. Proin eget egestas augue. Praesent ut sem nec arcu pellentesque aliquet. Duis dapibus diam vel metus tempus vulputate.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"d83c938719db61d502aadee6531e2d81","permalink":"http://localhost:1313/author/kim/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/author/kim/","section":"authors","summary":"吳恩達 is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.","tags":null,"title":"Kim","type":"authors"},{"authors":null,"categories":null,"content":"Nelson Bighetti is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.\nLorem ipsum dolor sit amet, consectetur adipiscing elit. Sed neque elit, tristique placerat feugiat ac, facilisis vitae arcu. Proin eget egestas augue. Praesent ut sem nec arcu pellentesque aliquet. Duis dapibus diam vel metus tempus vulputate.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"2525497d367e79493fd32b198b28f040","permalink":"http://localhost:1313/author/micv/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/author/micv/","section":"authors","summary":"Nelson Bighetti is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.","tags":null,"title":"MICV","type":"authors"},{"authors":[],"categories":null,"content":"Slides can be added in a few ways:\nCreate slides using Wowchemy’s Slides feature and link using slides parameter in the front matter of the talk file Upload an existing slide deck to static/ and link using url_slides parameter in the front matter of the talk file Embed your slides (e.g. Google Slides) or presentation video on this page using shortcodes. Further event details, including page elements such as image galleries, can be added to the body of this page.\n","date":1906549200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1906549200,"objectID":"a8edef490afe42206247b6ac05657af0","permalink":"http://localhost:1313/event/example/","publishdate":"2017-01-01T00:00:00Z","relpermalink":"/event/example/","section":"event","summary":"An example event.","tags":[],"title":"Example Event","type":"event"},{"authors":null,"categories":null,"content":"","date":1714521600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1714521600,"objectID":"fbd6b1d0335bc35868e4bfaefa5e14c4","permalink":"http://localhost:1313/post/2024-05-01-two-papers-early-accepted-for-miccai-2024/","publishdate":"2024-05-01T00:00:00Z","relpermalink":"/post/2024-05-01-two-papers-early-accepted-for-miccai-2024/","section":"post","summary":"","tags":null,"title":"Two papers early-accepted for MICCAI 2024","type":"post"},{"authors":null,"categories":null,"content":"NeuroGPT: 치매 진단보조를 위한 뇌영상 및 전자의무기록 중심 멀티모달 대화형 생성모델 개발\n","date":1712102400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1712102400,"objectID":"865d28670c6b745d3a219baf420bb7e8","permalink":"http://localhost:1313/post/2024-04-03-funding-%EC%9A%B0%EC%88%98%EC%8B%A0%EC%A7%84%EC%97%B0%EA%B5%AC/","publishdate":"2024-04-03T00:00:00Z","relpermalink":"/post/2024-04-03-funding-%EC%9A%B0%EC%88%98%EC%8B%A0%EC%A7%84%EC%97%B0%EA%B5%AC/","section":"post","summary":"NeuroGPT: 치매 진단보조를 위한 뇌영상 및 전자의무기록 중심 멀티모달 대화형 생성모델 개발\n","tags":null,"title":"Funding : 우수신진연구","type":"post"},{"authors":null,"categories":null,"content":"","date":1712016000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1712016000,"objectID":"6da1486ce46e1f979e293e996038d580","permalink":"http://localhost:1313/post/2024-04-02-funding-lg%EC%A0%84%EC%9E%90--lg-display/","publishdate":"2024-04-02T00:00:00Z","relpermalink":"/post/2024-04-02-funding-lg%EC%A0%84%EC%9E%90--lg-display/","section":"post","summary":"","tags":null,"title":"Funding - LG전자 \u0026 LG Display","type":"post"},{"authors":null,"categories":null,"content":"2023 우수업적교수상: 교육부문\n","date":1711929600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1711929600,"objectID":"fc6c3849d8b099e512b8906bf1acf5ce","permalink":"http://localhost:1313/post/2024-04-01-received-2023-distinguished-faculty-award-teaching/","publishdate":"2024-04-01T00:00:00Z","relpermalink":"/post/2024-04-01-received-2023-distinguished-faculty-award-teaching/","section":"post","summary":"2023 우수업적교수상: 교육부문\n","tags":null,"title":"Received 2023 Distinguished Faculty Award - Teaching","type":"post"},{"authors":null,"categories":null,"content":"인공지능 기반의 안드로겐 탈모 진단 시스템 연구개발\n","date":1709251200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1709251200,"objectID":"cd2deb17edf2ff0e1d60e1005d629c86","permalink":"http://localhost:1313/post/2024-03-01-funding-%EC%84%9C%EC%9A%B8%ED%98%81%EC%8B%A0%EC%B1%8C%EB%A6%B0%EC%A7%80%EB%B3%B8%EC%84%A0/","publishdate":"2024-03-01T00:00:00Z","relpermalink":"/post/2024-03-01-funding-%EC%84%9C%EC%9A%B8%ED%98%81%EC%8B%A0%EC%B1%8C%EB%A6%B0%EC%A7%80%EB%B3%B8%EC%84%A0/","section":"post","summary":"인공지능 기반의 안드로겐 탈모 진단 시스템 연구개발\n","tags":null,"title":"Funding - 서울혁신챌린지(본선)","type":"post"},{"authors":null,"categories":null,"content":"Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation accepted to CVPR\u0026#39;24 as a Highlight paper. Congrats to co-authors Chanyoung Kim, Woojung Han, and Dayun Ju!\n","date":1706745600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1706745600,"objectID":"61152d5926fb68751f949377bdf4f127","permalink":"http://localhost:1313/post/2024-02-01-cvpr24-our-work-eagle/","publishdate":"2024-02-01T00:00:00Z","relpermalink":"/post/2024-02-01-cvpr24-our-work-eagle/","section":"post","summary":"Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation accepted to CVPR'24 as a Highlight paper. Congrats to co-authors Chanyoung Kim, Woojung Han, and Dayun Ju!\n","tags":null,"title":"EAGLE accepted to CVPR'24 as a Highlight paper Congrats to co-authors Chanyoung Kim, Woojung Han, and Dayun Ju!","type":"post"},{"authors":null,"categories":null,"content":"","date":1704067200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1704067200,"objectID":"ce93c9f0baf674dc6c52b585ad1022dc","permalink":"http://localhost:1313/post/2024-01-01-yeongjun-jun-sujung-hong-junhyeok-kim-jiwoo-park-and-suhyun-kim-join-our-lab/","publishdate":"2024-01-01T00:00:00Z","relpermalink":"/post/2024-01-01-yeongjun-jun-sujung-hong-junhyeok-kim-jiwoo-park-and-suhyun-kim-join-our-lab/","section":"post","summary":"","tags":null,"title":"Yeongjun Jun, Sujung Hong, Junhyeok Kim, Jiwoo Park, and Suhyun Kim join our lab","type":"post"},{"authors":null,"categories":null,"content":"인공지능 기반의 안드로겐 탈모 진단 시스템 연구개발\n","date":1690934400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1690934400,"objectID":"265427f063c860300081d46b8e68ad97","permalink":"http://localhost:1313/post/2023-08-02-funding-%EC%84%9C%EC%9A%B8%ED%98%81%EC%8B%A0%EC%B1%8C%EB%A6%B0%EC%A7%80%EC%98%88%EC%84%A0/","publishdate":"2023-08-02T00:00:00Z","relpermalink":"/post/2023-08-02-funding-%EC%84%9C%EC%9A%B8%ED%98%81%EC%8B%A0%EC%B1%8C%EB%A6%B0%EC%A7%80%EC%98%88%EC%84%A0/","section":"post","summary":"인공지능 기반의 안드로겐 탈모 진단 시스템 연구개발\n","tags":null,"title":"Funding - 서울혁신챌린지(예선)","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Mahbaneh!\n","date":1690848000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1690848000,"objectID":"5c93bc68d6bb8e7c336a6c07fc5f33b7","permalink":"http://localhost:1313/post/2023-08-01-accepted-to-medical-image-analysis/","publishdate":"2023-08-01T00:00:00Z","relpermalink":"/post/2023-08-01-accepted-to-medical-image-analysis/","section":"post","summary":"Congrats to Mahbaneh!\n","tags":null,"title":"Accepted to Medical Image Analysis","type":"post"},{"authors":null,"categories":null,"content":"CT 영상 화질개선을 위한 인공지능 연구실\n","date":1688256000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1688256000,"objectID":"4836d8534f8206c914cbc170dd9f17d3","permalink":"http://localhost:1313/post/2023-07-02-funding-%ED%95%9C%EA%B5%AD%EC%97%B0%EA%B5%AC%EC%9E%AC%EB%8B%A8---%EA%B8%B0%EC%B4%88%EC%97%B0%EA%B5%AC%EC%8B%A4/","publishdate":"2023-07-02T00:00:00Z","relpermalink":"/post/2023-07-02-funding-%ED%95%9C%EA%B5%AD%EC%97%B0%EA%B5%AC%EC%9E%AC%EB%8B%A8---%EA%B8%B0%EC%B4%88%EC%97%B0%EA%B5%AC%EC%8B%A4/","section":"post","summary":"CT 영상 화질개선을 위한 인공지능 연구실\n","tags":null,"title":"Funding - 한국연구재단 / 기초연구실","type":"post"},{"authors":null,"categories":null,"content":"바이오 빅데이터 기반 당뇨병 및 합병증 정밀 의료를 위한 AI 플랫폼 및 모델 개발\n","date":1688169600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1688169600,"objectID":"f8e53c3f8400267a46789d3d4dc3dbec","permalink":"http://localhost:1313/post/2023-07-01-funding-%ED%95%9C%EA%B5%AD%EC%97%B0%EA%B5%AC%EC%9E%AC%EB%8B%A8-%EB%8D%B0%EC%9D%B4%ED%84%B0-%EA%B8%B0%EB%B0%98-%EB%94%94%EC%A7%80%ED%84%B8-%EB%B0%94%EC%9D%B4%EC%98%A4-%EC%84%A0%EB%8F%84%EC%82%AC%EC%97%85/","publishdate":"2023-07-01T00:00:00Z","relpermalink":"/post/2023-07-01-funding-%ED%95%9C%EA%B5%AD%EC%97%B0%EA%B5%AC%EC%9E%AC%EB%8B%A8-%EB%8D%B0%EC%9D%B4%ED%84%B0-%EA%B8%B0%EB%B0%98-%EB%94%94%EC%A7%80%ED%84%B8-%EB%B0%94%EC%9D%B4%EC%98%A4-%EC%84%A0%EB%8F%84%EC%82%AC%EC%97%85/","section":"post","summary":"바이오 빅데이터 기반 당뇨병 및 합병증 정밀 의료를 위한 AI 플랫폼 및 모델 개발\n","tags":null,"title":"Funding - 한국연구재단/데이터 기반 디지털 바이오 선도사업","type":"post"},{"authors":["Woojung Han*","Chanyoung Kim*","Dayun Ju","Yumin Shim","Seong Jae Hwang"],"categories":null,"content":"","date":1682899200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1682899200,"objectID":"e53fe15e44f0a6bcaa17d1aa78d58172","permalink":"http://localhost:1313/publication/2024-miccai-cxrl/","publishdate":"2023-05-01T00:00:00Z","relpermalink":"/publication/2024-miccai-cxrl/","section":"publication","summary":"MICCAI 2024 (Early Accept, top 11% of submissions)","tags":null,"title":"Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning","type":"publication"},{"authors":["Kyobin Choo","Youngjun Jun","Mijin Yun","Seong Jae Hwang"],"categories":null,"content":"","date":1682899200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1682899200,"objectID":"6712562cbf912247247a5a477dbd1e80","permalink":"http://localhost:1313/publication/2024-miccai-ct2mri/","publishdate":"2023-05-01T00:00:00Z","relpermalink":"/publication/2024-miccai-ct2mri/","section":"publication","summary":"MICCAI 2024 (Early Accept, top 11% of submissions)","tags":null,"title":"Slice-Consistent 3D Volumetric Brain CT-to-MRI Translation with 2D Brownian Bridge Diffusion Model","type":"publication"},{"authors":["Seil Kang","Donghyun Kim","Junhyeok Kim","Seong Jae Hwang"],"categories":null,"content":"","date":1682899200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1682899200,"objectID":"7f92ffd9dfec1aed7f3945997519b7b4","permalink":"http://localhost:1313/publication/2024-arxiv-wolf/","publishdate":"2023-05-01T00:00:00Z","relpermalink":"/publication/2024-arxiv-wolf/","section":"publication","summary":"arxiv","tags":null,"title":"WoLF: Large Language Model Framework for CXR Understanding","type":"publication"},{"authors":null,"categories":null,"content":"","date":1677628800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1677628800,"objectID":"873b25719908051a819e93a145d6a452","permalink":"http://localhost:1313/post/2023-03-01-dayun-ju-and-chanyoung-kim-join-our-lab/","publishdate":"2023-03-01T00:00:00Z","relpermalink":"/post/2023-03-01-dayun-ju-and-chanyoung-kim-join-our-lab/","section":"post","summary":"","tags":null,"title":"Dayun Ju and Chanyoung Kim join our lab","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Anthony!\n","date":1675209600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1675209600,"objectID":"fac76a5ede08bed8f981d7df493c5043","permalink":"http://localhost:1313/post/2023-02-01-accepted-for-machine-learning-on-theoretical-domain-generalization/","publishdate":"2023-02-01T00:00:00Z","relpermalink":"/post/2023-02-01-accepted-for-machine-learning-on-theoretical-domain-generalization/","section":"post","summary":"Congrats to Anthony!\n","tags":null,"title":"Accepted for Machine Learning on theoretical domain generalization","type":"post"},{"authors":["Chanyoung Kim*","Woojung Han*","Dayun Ju","Seong Jae Hwang"],"categories":null,"content":"","date":1675209600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1675209600,"objectID":"a0739f6fe549803ad66f87463165620a","permalink":"http://localhost:1313/publication/2024-cvpr-eagle/","publishdate":"2023-02-01T00:00:00Z","relpermalink":"/publication/2024-cvpr-eagle/","section":"publication","summary":"CVPR 2024 (Highlight)","tags":null,"title":"EAGLE: Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation","type":"publication"},{"authors":null,"categories":null,"content":"","date":1672617600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1672617600,"objectID":"b3064751b150f3d00cff86f7e38e6b4b","permalink":"http://localhost:1313/post/2023-01-02-gayoon-choi-taejin-jeong-and-jeahoon-joo-join-our-lab/","publishdate":"2023-01-02T00:00:00Z","relpermalink":"/post/2023-01-02-gayoon-choi-taejin-jeong-and-jeahoon-joo-join-our-lab/","section":"post","summary":"","tags":null,"title":"Gayoon Choi, Taejin Jeong, and Jeahoon Joo join our lab","type":"post"},{"authors":null,"categories":null,"content":"","date":1672531200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1672531200,"objectID":"1c1134d4bad091d6478dea8f49e5af6d","permalink":"http://localhost:1313/post/2023-01-01-accepted-for-isbi-2023/","publishdate":"2023-01-01T00:00:00Z","relpermalink":"/post/2023-01-01-accepted-for-isbi-2023/","section":"post","summary":"","tags":null,"title":"Accepted for ISBI 2023","type":"post"},{"authors":null,"categories":null,"content":"","date":1666569600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1666569600,"objectID":"c1d17ff2b20dca0ad6653a3161942b64","permalink":"http://localhost:1313/people/","publishdate":"2022-10-24T00:00:00Z","relpermalink":"/people/","section":"","summary":"","tags":null,"title":"Member","type":"landing"},{"authors":null,"categories":null,"content":"","date":1659312000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1659312000,"objectID":"d69f29f28d3eb66d0cf507360246fa29","permalink":"http://localhost:1313/post/2022-08-01-our-paper-won-the-best-paper-award-at-uai-2022/","publishdate":"2022-08-01T00:00:00Z","relpermalink":"/post/2022-08-01-our-paper-won-the-best-paper-award-at-uai-2022/","section":"post","summary":"","tags":null,"title":"Our paper won the Best Paper Award at UAI 2022","type":"post"},{"authors":null,"categories":null,"content":"","date":1656633600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1656633600,"objectID":"24180519862d7f050100ff08ed0e131c","permalink":"http://localhost:1313/post/2022-07-01-yumin-kim-seil-kang-kyobin-choo-hyunjin-kim-and-donghyun-kim-join-our-lab/","publishdate":"2022-07-01T00:00:00Z","relpermalink":"/post/2022-07-01-yumin-kim-seil-kang-kyobin-choo-hyunjin-kim-and-donghyun-kim-join-our-lab/","section":"post","summary":"","tags":null,"title":"Yumin Kim, Seil Kang, Kyobin Choo, Hyunjin Kim, and Donghyun Kim join our lab","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Anthony!\n","date":1651363200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1651363200,"objectID":"f288b5349e22ee71e4a48e3ddc2ad6ae","permalink":"http://localhost:1313/post/2022-05-01-accepted-for-uai-2022-eindhoven-the-netherlands-for-an-oral-presentation/","publishdate":"2022-05-01T00:00:00Z","relpermalink":"/post/2022-05-01-accepted-for-uai-2022-eindhoven-the-netherlands-for-an-oral-presentation/","section":"post","summary":"Congrats to Anthony!\n","tags":null,"title":"Accepted for UAI 2022, Eindhoven, the Netherlands for an Oral Presentation","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Xingchen and Anthony!\n","date":1648771200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1648771200,"objectID":"592e0dff31477386c301c532a34513bd","permalink":"http://localhost:1313/post/2022-04-01-accepted-for-ijcai-2022-vienna/","publishdate":"2022-04-01T00:00:00Z","relpermalink":"/post/2022-04-01-accepted-for-ijcai-2022-vienna/","section":"post","summary":"Congrats to Xingchen and Anthony!\n","tags":null,"title":"Accepted for IJCAI 2022, Vienna","type":"post"},{"authors":null,"categories":null,"content":"","date":1647129600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1647129600,"objectID":"486da6bf0d12cb71f50d6bfd0abd48ac","permalink":"http://localhost:1313/post/2022-03-01-joining-as-an-assistant-professor-in-the-department-of-artificial-intelligence-at-yonsei-university/","publishdate":"2022-03-13T00:00:00Z","relpermalink":"/post/2022-03-01-joining-as-an-assistant-professor-in-the-department-of-artificial-intelligence-at-yonsei-university/","section":"post","summary":"","tags":null,"title":"Joining as an Assistant Professor in the Department of Artificial Intelligence at Yonsei University","type":"post"},{"authors":null,"categories":null,"content":"","date":1646265600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1646265600,"objectID":"2aad396d68013183207138be2b946fbf","permalink":"http://localhost:1313/post/2022-03-03-yujin-yang-and-woojung-han-join-our-lab/","publishdate":"2022-03-03T00:00:00Z","relpermalink":"/post/2022-03-03-yujin-yang-and-woojung-han-join-our-lab/","section":"post","summary":"","tags":null,"title":"Yujin Yang and Woojung Han join our lab","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Anthony!\n","date":1646179200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1646179200,"objectID":"4cfe26d1b89d8c42381cec0afc13fad3","permalink":"http://localhost:1313/post/2022-03-02-accepted-for-findings-of-acl-2022/","publishdate":"2022-03-02T00:00:00Z","relpermalink":"/post/2022-03-02-accepted-for-findings-of-acl-2022/","section":"post","summary":"Congrats to Anthony!\n","tags":null,"title":"Accepted for Findings of ACL 2022","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Mahbaneh!\n","date":1633651200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1633651200,"objectID":"3f241bd46df500ce38daa21785f6c712","permalink":"http://localhost:1313/post/2021-10-08-accepted-for-neuroimage/","publishdate":"2021-10-08T00:00:00Z","relpermalink":"/post/2021-10-08-accepted-for-neuroimage/","section":"post","summary":"Congrats to Mahbaneh!\n","tags":null,"title":"Accepted for NeuroImage","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Mahbaneh!\n","date":1628899200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1628899200,"objectID":"16bc5efd56c7afdd7277f637d02b030e","permalink":"http://localhost:1313/post/2021-08-14-accepted-for-the-first-workshop-on-computer-vision-for-automated-medical-diagnosis-@-iccv-2021/","publishdate":"2021-08-14T00:00:00Z","relpermalink":"/post/2021-08-14-accepted-for-the-first-workshop-on-computer-vision-for-automated-medical-diagnosis-@-iccv-2021/","section":"post","summary":"Congrats to Mahbaneh!\n","tags":null,"title":"Accepted for The First Workshop on Computer Vision for Automated Medical Diagnosis @ ICCV 2021","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Anthony!\n","date":1623024000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1623024000,"objectID":"c1432ced9dbc3ff7232c8d764ddb2f60","permalink":"http://localhost:1313/post/2021-06-07-accepted-for-miccai-2021-virtual/","publishdate":"2021-06-07T00:00:00Z","relpermalink":"/post/2021-06-07-accepted-for-miccai-2021-virtual/","section":"post","summary":"Congrats to Anthony!\n","tags":null,"title":"Accepted for MICCAI 2021, Virtual","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Shibo!\n","date":1621123200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1621123200,"objectID":"d8acb15c32f13cc4f0f8af14c71afdd9","permalink":"http://localhost:1313/post/2021-05-16-accepted-for-midl-2021-virtual/","publishdate":"2021-05-16T00:00:00Z","relpermalink":"/post/2021-05-16-accepted-for-midl-2021-virtual/","section":"post","summary":"Congrats to Shibo!\n","tags":null,"title":"Accepted for MIDL 2021, Virtual","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Mahbaneh!\n","date":1615420800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1615420800,"objectID":"b26f9fae26815dfb3169f2893c00549e","permalink":"http://localhost:1313/post/2021-03-11-accepted-for-aaic-2021-denver-usa/","publishdate":"2021-03-11T00:00:00Z","relpermalink":"/post/2021-03-11-accepted-for-aaic-2021-denver-usa/","section":"post","summary":"Congrats to Mahbaneh!\n","tags":null,"title":"Accepted for AAIC 2021, Denver, USA","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Anthony and Xingchen!\n","date":1614384000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1614384000,"objectID":"704362a507c4fea9f73890e236b13825","permalink":"http://localhost:1313/post/2021-02-27-two-full-papers-accepted-for-isbi-2021-virtual/","publishdate":"2021-02-27T00:00:00Z","relpermalink":"/post/2021-02-27-two-full-papers-accepted-for-isbi-2021-virtual/","section":"post","summary":"Congrats to Anthony and Xingchen!\n","tags":null,"title":"Two Full Papers Accepted for ISBI 2021, Virtual","type":"post"},{"authors":null,"categories":null,"content":"","date":1612915200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1612915200,"objectID":"9254ac1edc77a4108a4fc2c6e0b1000a","permalink":"http://localhost:1313/post/2021-02-10-accepted-for-ipmi-2021-virtual/","publishdate":"2021-02-10T00:00:00Z","relpermalink":"/post/2021-02-10-accepted-for-ipmi-2021-virtual/","section":"post","summary":"","tags":null,"title":"Accepted for IPMI 2021, Virtual","type":"post"},{"authors":null,"categories":null,"content":"A bias-resilient deep learning algorithm for robust white matter hyperintensity segmentation on Alzheimer’s disease data with confounding factors\n","date":1608076800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1608076800,"objectID":"45d209567ad96fac5de130c1a736d65d","permalink":"http://localhost:1313/post/2020-12-16-received-alzheimers-disease-research-center-developmental-project-grant-by-pitt-adrc-for-2021-2022/","publishdate":"2020-12-16T00:00:00Z","relpermalink":"/post/2020-12-16-received-alzheimers-disease-research-center-developmental-project-grant-by-pitt-adrc-for-2021-2022/","section":"post","summary":"A bias-resilient deep learning algorithm for robust white matter hyperintensity segmentation on Alzheimer’s disease data with confounding factors\n","tags":null,"title":"Received Alzheimer's Disease Research Center Developmental Project grant by Pitt ADRC for 2021-2022","type":"post"},{"authors":null,"categories":null,"content":"Congratulations to Jian Yang and Monica Hall for winning the Best Paper Award at the 2020 Conference on Wowchemy for their paper “Learning Wowchemy”.\n","date":1606867200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1606867200,"objectID":"2a0ec8a990dbd78a00c4e15a09364b00","permalink":"http://localhost:1313/post/20-12-02-icml-best-paper/","publishdate":"2020-12-02T00:00:00Z","relpermalink":"/post/20-12-02-icml-best-paper/","section":"post","summary":"Congratulations to Jian Yang and Monica Hall for winning the Best Paper Award at the 2020 Conference on Wowchemy for their paper “Learning Wowchemy”.\n","tags":null,"title":"Bye","type":"post"},{"authors":null,"categories":null,"content":"Congratulations to Richard Hendricks for winning first place in the Wowchemy Prize.\nExplain\n","date":1606780800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1606780800,"objectID":"be2bd15f022f0d83fe9ffd743881e70c","permalink":"http://localhost:1313/post/20-12-01-wowchemy-prize/","publishdate":"2020-12-01T00:00:00Z","relpermalink":"/post/20-12-01-wowchemy-prize/","section":"post","summary":"Congratulations to Richard Hendricks for winning first place in the Wowchemy Prize.\n","tags":null,"title":"Hello","type":"post"},{"authors":null,"categories":null,"content":"","date":1603411200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1603411200,"objectID":"d88e0420aec9859031ea63f15d545600","permalink":"http://localhost:1313/post/2020-10-23-accepted-for-the-anatomical-record/","publishdate":"2020-10-23T00:00:00Z","relpermalink":"/post/2020-10-23-accepted-for-the-anatomical-record/","section":"post","summary":"","tags":null,"title":"Accepted for The Anatomical Record","type":"post"},{"authors":null,"categories":null,"content":"","date":1593648000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1593648000,"objectID":"cd0f4376ad0a354d6e5cfe8a7bd97b67","permalink":"http://localhost:1313/post/2020-07-02-accepted-for-an-oral-presentation-at-the-workshop-on-bioimage-computing-@-eccv-2020/","publishdate":"2020-07-02T00:00:00Z","relpermalink":"/post/2020-07-02-accepted-for-an-oral-presentation-at-the-workshop-on-bioimage-computing-@-eccv-2020/","section":"post","summary":"","tags":null,"title":"Accepted for an Oral Presentation at The Workshop on BioImage Computing @ ECCV 2020","type":"post"},{"authors":null,"categories":null,"content":"Congratulations!\n","date":1587513600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1587513600,"objectID":"e4b691a58382c9d333f96279c4afeaee","permalink":"http://localhost:1313/post/2020-04-22-xingchen-zhao-received-the-sci-undergraduate-research-scholars-award-for-summer-2020/","publishdate":"2020-04-22T00:00:00Z","relpermalink":"/post/2020-04-22-xingchen-zhao-received-the-sci-undergraduate-research-scholars-award-for-summer-2020/","section":"post","summary":"Congratulations!\n","tags":null,"title":"Xingchen Zhao received the SCI Undergraduate Research Scholars award for Summer 2020","type":"post"},{"authors":null,"categories":null,"content":"","date":1578009600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1578009600,"objectID":"51e1a843b2b7cd5c2743d287a11a1237","permalink":"http://localhost:1313/post/2020-01-03-accepted-for-isbi-2020-iowa-city-usa/","publishdate":"2020-01-03T00:00:00Z","relpermalink":"/post/2020-01-03-accepted-for-isbi-2020-iowa-city-usa/","section":"post","summary":"","tags":null,"title":"Accepted for ISBI 2020, Iowa City, USA","type":"post"},{"authors":null,"categories":null,"content":"","date":1571443200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1571443200,"objectID":"b44b6875b186da4bb05efe3137c52234","permalink":"http://localhost:1313/post/2019-10-19-joining-as-an-assistant-professor-in-the-department-of-computer-science-at-the-university-of-pittsburgh/","publishdate":"2019-10-19T00:00:00Z","relpermalink":"/post/2019-10-19-joining-as-an-assistant-professor-in-the-department-of-computer-science-at-the-university-of-pittsburgh/","section":"post","summary":"","tags":null,"title":"Joining as an Assistant Professor in the Department of Computer Science at the University of Pittsburgh","type":"post"},{"authors":null,"categories":null,"content":" Create your slides in Markdown - click the Slides button to check out the example. Add the publication’s full text or supplementary notes here. You can use rich formatting such as including code, math, and images.\n","date":1554595200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1554595200,"objectID":"c645ab16c36678119f3be6cebedb4a98","permalink":"http://localhost:1313/publication-dummy/preprint/","publishdate":"2017-01-01T00:00:00Z","relpermalink":"/publication-dummy/preprint/","section":"publication-dummy","summary":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis posuere tellus ac convallis placerat. Proin tincidunt magna sed ex sollicitudin condimentum.","tags":["Source Themes"],"title":"An example preprint / working paper","type":"publication-dummy"},{"authors":["Robert Ford"],"categories":null,"content":" Click the Cite button above to demo the feature to enable visitors to import publication metadata into their reference management software. Create your slides in Markdown - click the Slides button to check out the example. Add the publication’s full text or supplementary notes here. You can use rich formatting such as including code, math, and images.\n","date":1441065600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1441065600,"objectID":"74175087caa584901180f346cddd6ef2","permalink":"http://localhost:1313/publication-dummy/journal-article/","publishdate":"2017-01-01T00:00:00Z","relpermalink":"/publication-dummy/journal-article/","section":"publication-dummy","summary":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis posuere tellus ac convallis placerat. Proin tincidunt magna sed ex sollicitudin condimentum.","tags":["Source Themes"],"title":"An example journal article","type":"publication-dummy"},{"authors":["Robert Ford"],"categories":null,"content":" Click the Cite button above to demo the feature to enable visitors to import publication metadata into their reference management software. Create your slides in Markdown - click the Slides button to check out the example. Add the publication’s full text or supplementary notes here. You can use rich formatting such as including code, math, and images.\n","date":1372636800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1372636800,"objectID":"d490c8bbd639f0a713b2dbb7172bb554","permalink":"http://localhost:1313/publication-dummy/conference-paper/","publishdate":"2017-01-01T00:00:00Z","relpermalink":"/publication-dummy/conference-paper/","section":"publication-dummy","summary":".","tags":[],"title":"An example conference paper","type":"publication-dummy"},{"authors":null,"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"f26b5133c34eec1aa0a09390a36c2ade","permalink":"http://localhost:1313/admin/config.yml","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/admin/config.yml","section":"","summary":"","tags":null,"title":"","type":"decap_cms"},{"authors":null,"categories":null,"content":"\u0026lt;!DOCTYPE html\u0026gt; CoBra CoBra: Complementary Branch Fusing Class and Semantic Knowledge for Robust Weakly Supervised Semantic Segmentation code Code Overview illustration of our model, Complementary Branch (CoBra). The dual branch framework consists of the Class-Aware Knowledge branch with CNN and the Semantic-Aware Knowledge branch with ViT. They give complementary knowledge to each branch. Motivation While Class Activation Maps (CAMs) using CNNs have steadily been contributing to the success of WSSS, the resulting activation maps often narrowly focus on class-specific parts (e.g., only face of human). On the other hand, recent works based on vision transformers (ViT) have shown promising results based on their self-attention mechanism to capture the semantic parts but fail in capturing complete class-specific details (e.g., entire body parts of human but also with a dog nearby). The figure shows the comparison of object localization maps from each CNN, ViT, and Cobra branches for various subjects (human, dog, airplane), illustrating the distinctive areas of interest each model identifies. Our model successfully utilizes complementary characteristics to localize the exact object of the correct class and its semantic parts. Key Contribution We propose a dual branch framework, namely Complementary Branch (CoBra), which aims to fuse the complementary nature of CNN and ViT localization maps. We capture the class and semantic knowledge as Class Aware Projection (CAP) and Semantic-Aware Projection (SAP) respectively for effective complementary guidance to the CNN and ViT branches in CoBra, employing contrastive learning for enhanced guidance. Extensive experiments qualitatively and quantitatively investigate how CNN and ViT complement each other on the PASCAL VOC 2012 dataset and MS COCO 2014 dataset, showing a state-of-the-art WSSS result. Main model Overview illustration of our model. (I) Class Aware Knoweldge(CAK): The CNN outputs a feature map which generates (1) CNN CAMs via $f_{CAM}$, (2) Pseudo-Labels from CNN CAMs via $argmax$, and (3) Class-Aware Projection (CAP) via $f_{proj}$. (II) Semantic Aware Knowledge(SAK): The ViT outputs $N^2$ Patch Embeddings which generate (1) ViT CAMs via $f_{CAM}$ and (2) Semantic-Aware Projection (SAP) via $f_{proj}$. We also use the Attention Maps of all $L$-layers to generate (3) Patch Affinity of size $N^2 \\times N^2$. Method Illustration of refining CAP and SAP from SAK and CAK branch respectively. (I) Class Aware Knoweldge(CAK): The CAP values are embedded in the Class Feature Space. (1) The patch affinity from SAK branch assigns the positive (green), negative (red), and neutral (teal) patches based on the target (white) patch. (2) The CNN CAM shows that the false negative patches have been weakly localized as horse. (3) The CAP loss pull those weakly localized patches (i.e., false class negatives) since they are assigned as semantically positive patches based on SAK branch. (3) The CAP is refined to improve the CNN CAM showing fewer false class negatives. (II) Semantic Aware Knowledge(SAK): The SAP values are embedded in the Semantic Feature Space. (1) The CNN CAM from CAK branch assigns the positive (green), negative (red), and neutral (teal) patches based on the target (white) patch. (2) The ViT CAM shows that the negative patches have been incorrectly localized as horse. The SAP loss pushes away those incorrectly localized patches (i.e., false class positives) since they are assigned as negative patches based on CAK branch. (3) The SAP is refined to improve the ViT CAM showing fewer false class positives. Quantitative Experiments Pascal VOC 2012 seed \u0026amp; mask results Evaluation of initial seed and corresponding pseudo segmentation mask on PASCAL VOC 2012 training set in mIoU (%). Pascal VOC 2012 segmentation results Semantic segmentation results on the validation (Val) and Test set of PASCAL VOC 2012 dataset. Sup. (Supervision) : Image (I) and Saliency Map (S). MS-COCO 2014 segmentation results Semgentation mIoU results(%) on MS-COCO 2014 val dataset Qualitative Experiments Seed Results Qualitative results. From left: (1) Input image, (2) Our result, (3) CNN CAM of our model, (4) Ours without SAP Loss, (5) ViT CAM of our model, (6) Ours without CAP Loss, (7) Our Pseudo mask for segmentation and (8) ground-truth segmentation label. We see that our results are able to differentiate between classes while finding their accurate object boundaries. Pascal VOC Segmentation Results Qualitative seg results on the PASCAL VOC val set. MS COCO Segmentation Results Qualitative seg results on the MS COCO val set. This website is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. This means you are free to borrow the source code of this website, we just ask that you link back to this page in the footer. Please remember to remove the analytics code included in the header of the website which you do not want on your website. ","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"c8c40f72d166810ae9da720b8b232b38","permalink":"http://localhost:1313/cobra2024/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/cobra2024/","section":"","summary":"\u003c!DOCTYPE html\u003e CoBra CoBra: Complementary Branch Fusing Class and Semantic Knowledge for Robust Weakly Supervised Semantic Segmentation code Code Overview illustration of our model, Complementary Branch (CoBra). The dual branch framework consists of the Class-Aware Knowledge branch with CNN and the Semantic-Aware Knowledge branch with ViT.","tags":null,"title":"","type":"page"},{"authors":null,"categories":null,"content":"\u0026lt;!DOCTYPE html\u0026gt; CXRL Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning Early Accept @ MICCAI 2024 Woojung Han*, Chanyoung Kim*, Dayun Ju, Yumin Shim, Seong Jae Hwang Yonsei University Paper arXiv Code We introduce CXRL, Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning. Abstract Recent advances in text-conditioned image generation diffusion models have begun paving the way for new opportunities in modern medical domain, in particular, generating Chest X-rays (CXRs) from diagnostic reports. Nonetheless, to further drive the diffusion models to generate CXRs that faithfully reflect the complexity and diversity of real data, it has become evident that a nontrivial learning approach is needed. In light of this, we propose CXRL, a framework motivated by the potential of reinforcement learning (RL). Specifically, we integrate a policy gradient RL approach with well-designed multiple distinctive CXR-domain specific reward models. This approach guides the diffusion denoising trajectory, achieving precise CXR posture and pathological details. Here, considering the complex medical image environment, we present “RL with Comparative Feedback” (RLCF) for the reward mechanism, a human-like comparative evaluation that is known to be more effective and reliable in complex scenarios compared to direct evaluation. Our CXRL framework includes jointly optimizing learnable adaptive condition embeddings (ACE) and the image generator, enabling the model to produce more accurate and higher perceptual CXR quality. Our extensive evaluation of the MIMIC-CXR-JPG dataset demonstrates the effectiveness of our RL-based tuning approach. Consequently, our CXRL generates pathologically realistic CXRs, establishing a new standard for generating CXRs with high fidelity to real-world clinical scenarios. Video Method Pipeline The pipeline of CXRL. Our model employs policy gradient optimization utilizing multi-reward feedback, fine-tuning image generator, and ACE to produce realistic and accurate CXR that corresponds closely to the input report. Contribution Our study pioneers in applying RL to text-conditioned medical image synthesis, particularly in CXRs, focusing on detail refinement and input condition control for clinical accuracy. We advance report-to-CXR generation with an RLCF-based rewarding mechanism, emphasizing posture alignment, pathology accuracy, and consistency between input reports and generated CXRs. We jointly optimize the image generator and ACE via reward feedback models, ensuring image-text alignment and medical accuracy across varied reports, setting a new benchmark in a report-to-CXR generation. Reward Feedback Models A detailed illustration of our reward feedback models. We incorporate three different feedbacks for report-to-CXR generation model to generate goal-oriented CXRs. Posture Alignment Feedback: Generated CXRs often face scaling issues, like excessive zooming or rotation, obscuring essential details. To counter these undesirable effects, we introduce a reward signal to align the CXR\u0026#39;s posture with a canonical orientation to preserve essential parts. Diagnostic Condition Feedback: To accurately reflect generated CXRs with referenced pathologies, we classify them using a parsed report label, rewarding its accuracy. Multimodal Consistency Feedback: We enforce the generated CXRs to better match their reports. We leverage a multimodal latent representation pretrained with CXR-report pairs for semantic agreement assessment. Qualitative Results Comparison between previous models and ours Comparison between previous state-of-the-art report-to-CXR generation models [19,3] and ours. The blue and green texts match their corresponding colored arrows. Additional Qualitative results Additional qualitative results of our framework comparing against baselines. The colored texts match their corresponding colored arrows. Ours w/o ACE or RLCF demonstrates superior report agreement and posture alignment compared to other baselines. CXRL is observed to generate more advanced high-fidelity CXRs that highlight our methodology\u0026#39;s effectiveness in synthesizing clinically accurate medical images. Qualitative ablation on each reward model (a): CXRL shows significantly better alignment of the clavicle and costophrenic angle compared to the anchor regarding posture alignment. (b): CXRL demonstrates improved predictive diagnostic accuracy, closely matching the GT and enhancing clinical decision-making (c): The multimodal consistency reward ensures that CXRs and reports correspond well, as observed by arrows and text in matching colors. Evaluation of generated CXRs from multiple feedback perspectives Evaluation Metrics The table compares the performance of various methods using three evaluation metrics. CXR Quality Table Comparative analysis of generated CXR quality: (a) quantitatively compares established models using FID and MS-SSIM metrics; (b) evaluates the impact of …","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"b6ade3eb7d0d5642a41d5c4a517e32b6","permalink":"http://localhost:1313/cxrl2024/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/cxrl2024/","section":"","summary":"\u003c!DOCTYPE html\u003e CXRL Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning Early Accept @ MICCAI 2024 Woojung Han*, Chanyoung Kim*, Dayun Ju, Yumin Shim, Seong Jae Hwang Yonsei University Paper arXiv Code We introduce CXRL, Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning.","tags":null,"title":"","type":"page"},{"authors":null,"categories":null,"content":"\u0026lt;!DOCTYPE html\u0026gt; EAGLE EAGLE🦅: Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation Highlight @ CVPR 2024 Chanyoung Kim*, Woojung Han*, Dayun Ju, Seong Jae Hwang Yonsei University Paper arXiv Code We introduce EAGLE, Eigen AGgregation LEarning for object-centric unsupervised semantic segmentation. Abstract Semantic segmentation has innately relied on extensive pixel-level labeled annotated data, leading to the emergence of unsupervised methodologies. Among them, leveraging self-supervised Vision Transformers for unsupervised semantic segmentation (USS) has been making steady progress with expressive deep features. Yet, for semantically segmenting images with complex objects, a predominant challenge remains: the lack of explicit object-level semantic encoding in patch-level features. This technical limitation often leads to inadequate segmentation of complex objects with diverse structures. To address this gap, we present a novel approach, EAGLE, which emphasizes object-centric representation learning for unsupervised semantic segmentation. Specifically, we introduce EiCue, a spectral technique providing semantic and structural cues through an eigenbasis derived from the semantic similarity matrix of deep image features and color affinity from an image. Further, by incorporating our object-centric contrastive loss with EiCue, we guide our model to learn object-level representations with intra- and inter-image object-feature consistency, thereby enhancing semantic accuracy. Extensive experiments on COCO-Stuff, Cityscapes, and Potsdam-3 datasets demonstrate the state-of-the-art USS results of EAGLE with accurate and consistent semantic segmentation across complex scenes. Video Method Pipeline The pipeline of EAGLE. Leveraging the Laplacian matrix, which integrates hierarchically projected image key features and color affinity, the model exploits eigenvector clustering to capture object-level perspective cues defined as \\( \\mathrm{\\mathcal{M}}_{eicue} \\) and \\( \\mathrm{\\tilde{\\mathcal{M}}_{eicue}} \\). Distilling knowledge from \\( \\mathrm{\\mathcal{M}}_{eicue} \\), our model further adopts an object-centric contrastive loss, utilizing the projected vector \\( \\mathrm{Z} \\) and \\( \\mathrm{\\tilde{Z}} \\). The learnable prototype \\( \\mathrm{\\Phi} \\) assigned from \\( \\mathrm{Z} \\) and \\( \\mathrm{\\tilde{Z}} \\), acts as a singular anchor that contrasts positive objects and negative objects. Our object-centric contrastive loss is computed in two distinct manners: intra(\\( \\mathrm{\\mathcal{L}}_{obj} \\))- and inter(\\( \\mathrm{\\mathcal{L}}_{sc} \\))-image to ensure semantic consistency. Eigen Aggregation Module An illustration of the EiCue generation. From the input image, both color affinity matrix \\( \\mathrm{A_{color}} \\) and semantic similarity matrix \\( \\mathrm{A_{seg}} \\) are derived, which are combined to form the Laplacian \\( \\mathrm{L_{sym}} \\). An eigenvector subset \\( \\mathrm{\\hat{V}} \\) of \\( \\mathrm{L_{sym}} \\) are clustered to produce EiCue. Visualization of Primary Elements Eigenvectors Visualizing eigenvectors derived from \\( \\mathrm{S} \\) in the Eigen Aggregation Module. These eigenvectors not only distinguish different objects but also identify semantically related areas, highlighting how EiCue captures object semantics and boundaries effectively. EiCue Comparison between K-means and EiCue. The bottom row presents EiCue, highlighting its superior ability to capture subtle structural intricacies and understand deeper semantic relationships, which is not as effectively achieved by K-means. Qualitative Results COCO-Stuff Qualitative results of COCO-Stuff dataset trained with ViT-S/8 backbone. Cityscapes Qualitative results of Cityscapes dataset trained with ViT-B/8 backbone. Potsdam-3 Qualitative results of Potsdam-3 dataset trained with ViT-B/8 backbone. Quantitative Results COCO-Stuff Quantitative results on the COCO-Stuff dataset. Cityscapes Quantitative results on the Cityscapes dataset. Potsdam-3 Quantitative results on the Potsdam-3 dataset. BibTeX @InProceedings{2024eagle, author = {Kim, Chanyoung and Han, Woojung and Ju, Dayun and Hwang, Seong Jae}, title = {EAGLE: Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2024} } This website is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. This means you are free to borrow the source code of this website, we just ask that you link back to this page in the footer. Please remember to remove the analytics code included in the header of the website which you do not want on your website. ","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"eeeb8f60f540691edb50ec50a184a95a","permalink":"http://localhost:1313/eagle2024/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/eagle2024/","section":"","summary":"\u003c!DOCTYPE html\u003e EAGLE EAGLE🦅: Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation Highlight @ CVPR 2024 Chanyoung Kim*, Woojung Han*, Dayun Ju, Seong Jae Hwang Yonsei University Paper arXiv Code We introduce EAGLE, Eigen AGgregation LEarning for object-centric unsupervised semantic segmentation.","tags":null,"title":"","type":"page"}]
\ No newline at end of file
+ [{"authors":["jae wan"],"categories":null,"content":"","date":1717113600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1717113600,"objectID":"6ef01836a0fda1169e3f8c3cbb6adb51","permalink":"http://localhost:1313/author/jae-wan-park/","publishdate":"2024-05-31T00:00:00Z","relpermalink":"/author/jae-wan-park/","section":"authors","summary":"","tags":null,"title":"Jae Wan Park","type":"authors"},{"authors":["jinyeong"],"categories":null,"content":"","date":1716940800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1716940800,"objectID":"4bd6e37cc21ca9840dece27b03aab4eb","permalink":"http://localhost:1313/author/jinyeong-kim/","publishdate":"2024-05-29T00:00:00Z","relpermalink":"/author/jinyeong-kim/","section":"authors","summary":"","tags":null,"title":"Jinyeong Kim","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1714521600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1714521600,"objectID":"a1c3fca72509f7369afe1ae0d0d66b3c","permalink":"http://localhost:1313/author/hyunkyung-kwon/","publishdate":"2024-05-01T00:00:00Z","relpermalink":"/author/hyunkyung-kwon/","section":"authors","summary":"","tags":null,"title":"Hyunkyung Kwon","type":"authors"},{"authors":["jiwoo"],"categories":null,"content":"","date":1709251200,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1709251200,"objectID":"36ff167c0dda1f64f8379f3c4ab0b0a9","permalink":"http://localhost:1313/author/jiwoo-park/","publishdate":"2024-03-01T00:00:00Z","relpermalink":"/author/jiwoo-park/","section":"authors","summary":"","tags":null,"title":"Jiwoo Park","type":"authors"},{"authors":["suhyun"],"categories":null,"content":"","date":1705881600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1705881600,"objectID":"1cb8c53dd729e6257464d64e5e30b8c2","permalink":"http://localhost:1313/author/suhyun-kim/","publishdate":"2024-01-22T00:00:00Z","relpermalink":"/author/suhyun-kim/","section":"authors","summary":"","tags":null,"title":"Suhyun Kim","type":"authors"},{"authors":["jinyeong"],"categories":null,"content":"","date":1704067200,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1704067200,"objectID":"dbad4d12ff85d584209761f88fcb9051","permalink":"http://localhost:1313/author/chanyong-yoon/","publishdate":"2024-01-01T00:00:00Z","relpermalink":"/author/chanyong-yoon/","section":"authors","summary":"","tags":null,"title":"Chanyong Yoon","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1704067200,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1704067200,"objectID":"2488f997c890bd559820a9767ca734e9","permalink":"http://localhost:1313/author/tae-eun-choi/","publishdate":"2024-01-01T00:00:00Z","relpermalink":"/author/tae-eun-choi/","section":"authors","summary":"","tags":null,"title":"Tae Eun Choi","type":"authors"},{"authors":["junhyeok"],"categories":null,"content":"… is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.\nLorem ipsum dolor sit amet, consectetur adipiscing elit. Sed neque elit, tristique placerat feugiat ac, facilisis vitae arcu. Proin eget egestas augue. Praesent ut sem nec arcu pellentesque aliquet. Duis dapibus diam vel metus tempus vulputate.\n","date":1698796800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1698796800,"objectID":"0a8c118bd2666ffea4cc0e48ab8c6969","permalink":"http://localhost:1313/author/junhyeok-kim/","publishdate":"2023-11-01T00:00:00Z","relpermalink":"/author/junhyeok-kim/","section":"authors","summary":"… is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.","tags":null,"title":"Junhyeok Kim","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1693785600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1693785600,"objectID":"09510750f6aa7cb670194d983a945aff","permalink":"http://localhost:1313/author/sujung-hong/","publishdate":"2023-09-04T00:00:00Z","relpermalink":"/author/sujung-hong/","section":"authors","summary":"","tags":null,"title":"Sujung Hong","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1693785600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1693785600,"objectID":"d6770fe491922eab5700c553acfa2414","permalink":"http://localhost:1313/author/youngjun-jun/","publishdate":"2023-09-04T00:00:00Z","relpermalink":"/author/youngjun-jun/","section":"authors","summary":"","tags":null,"title":"Youngjun Jun","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1678665600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1678665600,"objectID":"4a50e6146f083cb942b43d3f3d0dda8a","permalink":"http://localhost:1313/author/chanyoung-kim/","publishdate":"2023-03-13T00:00:00Z","relpermalink":"/author/chanyoung-kim/","section":"authors","summary":"","tags":null,"title":"Chanyoung Kim","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1678665600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1678665600,"objectID":"22aa61c95c3e9abdb51802ad21a0fe73","permalink":"http://localhost:1313/author/dayun-ju/","publishdate":"2023-03-13T00:00:00Z","relpermalink":"/author/dayun-ju/","section":"authors","summary":"","tags":null,"title":"Dayun Ju","type":"authors"},{"authors":["jaehoon"],"categories":null,"content":"","date":1669852800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1669852800,"objectID":"665c1fe62ac64673fa6cf51b7804422b","permalink":"http://localhost:1313/author/jaehoon-joo/","publishdate":"2022-12-01T00:00:00Z","relpermalink":"/author/jaehoon-joo/","section":"authors","summary":"","tags":null,"title":"Jaehoon Joo","type":"authors"},{"authors":["gayoon"],"categories":null,"content":"","date":1665532800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1665532800,"objectID":"8f41a3d9f711900f4736bfc5257e7613","permalink":"http://localhost:1313/author/gayoon-choi/","publishdate":"2022-10-12T00:00:00Z","relpermalink":"/author/gayoon-choi/","section":"authors","summary":"","tags":null,"title":"Gayoon Choi","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1661990400,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1661990400,"objectID":"17273d0a97fcfb096f33ebf5e551afb9","permalink":"http://localhost:1313/author/yumin-kim/","publishdate":"2022-09-01T00:00:00Z","relpermalink":"/author/yumin-kim/","section":"authors","summary":"","tags":null,"title":"Yumin Kim","type":"authors"},{"authors":["taejin"],"categories":null,"content":"","date":1657929600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1657929600,"objectID":"754d3a47a9b66a96798ad9aecfefaf48","permalink":"http://localhost:1313/author/taejin-jeong/","publishdate":"2022-07-16T00:00:00Z","relpermalink":"/author/taejin-jeong/","section":"authors","summary":"","tags":null,"title":"Taejin Jeong","type":"authors"},{"authors":["donghyun"],"categories":null,"content":"","date":1657843200,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1657843200,"objectID":"b7c02d3f0f41186bce5fa3f8a2d2da73","permalink":"http://localhost:1313/author/donghyun-kim/","publishdate":"2022-07-15T00:00:00Z","relpermalink":"/author/donghyun-kim/","section":"authors","summary":"","tags":null,"title":"Donghyun Kim","type":"authors"},{"authors":["dayun"],"categories":null,"content":"","date":1647561600,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1647561600,"objectID":"7b7cc2c60afbdbe6f96d828e5007fc9b","permalink":"http://localhost:1313/author/kyobin-choo/","publishdate":"2022-03-18T00:00:00Z","relpermalink":"/author/kyobin-choo/","section":"authors","summary":"","tags":null,"title":"Kyobin Choo","type":"authors"},{"authors":["yujin"],"categories":null,"content":"","date":1646092800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":1646092800,"objectID":"2974b3af626d9d6043aad3c10be10e48","permalink":"http://localhost:1313/author/yujin-yang/","publishdate":"2022-03-01T00:00:00Z","relpermalink":"/author/yujin-yang/","section":"authors","summary":"","tags":null,"title":"Yujin Yang","type":"authors"},{"authors":["Kim"],"categories":null,"content":"吳恩達 is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.\nLorem ipsum dolor sit amet, consectetur adipiscing elit. Sed neque elit, tristique placerat feugiat ac, facilisis vitae arcu. Proin eget egestas augue. Praesent ut sem nec arcu pellentesque aliquet. Duis dapibus diam vel metus tempus vulputate.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"d83c938719db61d502aadee6531e2d81","permalink":"http://localhost:1313/author/kim/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/author/kim/","section":"authors","summary":"吳恩達 is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.","tags":null,"title":"Kim","type":"authors"},{"authors":null,"categories":null,"content":"Nelson Bighetti is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.\nLorem ipsum dolor sit amet, consectetur adipiscing elit. Sed neque elit, tristique placerat feugiat ac, facilisis vitae arcu. Proin eget egestas augue. Praesent ut sem nec arcu pellentesque aliquet. Duis dapibus diam vel metus tempus vulputate.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"2525497d367e79493fd32b198b28f040","permalink":"http://localhost:1313/author/micv/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/author/micv/","section":"authors","summary":"Nelson Bighetti is a professor of artificial intelligence at the Stanford AI Lab. His research interests include distributed robotics, mobile computing and programmable matter. He leads the Robotic Neurobiology group, which develops self-reconfiguring robots, systems of self-organizing robots, and mobile sensor networks.","tags":null,"title":"MICV","type":"authors"},{"authors":[],"categories":null,"content":"Slides can be added in a few ways:\nCreate slides using Wowchemy’s Slides feature and link using slides parameter in the front matter of the talk file Upload an existing slide deck to static/ and link using url_slides parameter in the front matter of the talk file Embed your slides (e.g. Google Slides) or presentation video on this page using shortcodes. Further event details, including page elements such as image galleries, can be added to the body of this page.\n","date":1906549200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1906549200,"objectID":"a8edef490afe42206247b6ac05657af0","permalink":"http://localhost:1313/event/example/","publishdate":"2017-01-01T00:00:00Z","relpermalink":"/event/example/","section":"event","summary":"An example event.","tags":[],"title":"Example Event","type":"event"},{"authors":null,"categories":null,"content":"","date":1714521600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1714521600,"objectID":"fbd6b1d0335bc35868e4bfaefa5e14c4","permalink":"http://localhost:1313/post/2024-05-01-two-papers-early-accepted-for-miccai-2024/","publishdate":"2024-05-01T00:00:00Z","relpermalink":"/post/2024-05-01-two-papers-early-accepted-for-miccai-2024/","section":"post","summary":"","tags":null,"title":"Two papers early-accepted for MICCAI 2024","type":"post"},{"authors":null,"categories":null,"content":"NeuroGPT: 치매 진단보조를 위한 뇌영상 및 전자의무기록 중심 멀티모달 대화형 생성모델 개발\n","date":1712102400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1712102400,"objectID":"865d28670c6b745d3a219baf420bb7e8","permalink":"http://localhost:1313/post/2024-04-03-funding-%EC%9A%B0%EC%88%98%EC%8B%A0%EC%A7%84%EC%97%B0%EA%B5%AC/","publishdate":"2024-04-03T00:00:00Z","relpermalink":"/post/2024-04-03-funding-%EC%9A%B0%EC%88%98%EC%8B%A0%EC%A7%84%EC%97%B0%EA%B5%AC/","section":"post","summary":"NeuroGPT: 치매 진단보조를 위한 뇌영상 및 전자의무기록 중심 멀티모달 대화형 생성모델 개발\n","tags":null,"title":"Funding : 우수신진연구","type":"post"},{"authors":null,"categories":null,"content":"","date":1712016000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1712016000,"objectID":"6da1486ce46e1f979e293e996038d580","permalink":"http://localhost:1313/post/2024-04-02-funding-lg%EC%A0%84%EC%9E%90--lg-display/","publishdate":"2024-04-02T00:00:00Z","relpermalink":"/post/2024-04-02-funding-lg%EC%A0%84%EC%9E%90--lg-display/","section":"post","summary":"","tags":null,"title":"Funding - LG전자 \u0026 LG Display","type":"post"},{"authors":null,"categories":null,"content":"2023 우수업적교수상: 교육부문\n","date":1711929600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1711929600,"objectID":"fc6c3849d8b099e512b8906bf1acf5ce","permalink":"http://localhost:1313/post/2024-04-01-received-2023-distinguished-faculty-award-teaching/","publishdate":"2024-04-01T00:00:00Z","relpermalink":"/post/2024-04-01-received-2023-distinguished-faculty-award-teaching/","section":"post","summary":"2023 우수업적교수상: 교육부문\n","tags":null,"title":"Received 2023 Distinguished Faculty Award - Teaching","type":"post"},{"authors":null,"categories":null,"content":"인공지능 기반의 안드로겐 탈모 진단 시스템 연구개발\n","date":1709251200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1709251200,"objectID":"cd2deb17edf2ff0e1d60e1005d629c86","permalink":"http://localhost:1313/post/2024-03-01-funding-%EC%84%9C%EC%9A%B8%ED%98%81%EC%8B%A0%EC%B1%8C%EB%A6%B0%EC%A7%80%EB%B3%B8%EC%84%A0/","publishdate":"2024-03-01T00:00:00Z","relpermalink":"/post/2024-03-01-funding-%EC%84%9C%EC%9A%B8%ED%98%81%EC%8B%A0%EC%B1%8C%EB%A6%B0%EC%A7%80%EB%B3%B8%EC%84%A0/","section":"post","summary":"인공지능 기반의 안드로겐 탈모 진단 시스템 연구개발\n","tags":null,"title":"Funding - 서울혁신챌린지(본선)","type":"post"},{"authors":null,"categories":null,"content":"Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation accepted to CVPR\u0026#39;24 as a Highlight paper. Congrats to co-authors Chanyoung Kim, Woojung Han, and Dayun Ju!\n","date":1706745600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1706745600,"objectID":"61152d5926fb68751f949377bdf4f127","permalink":"http://localhost:1313/post/2024-02-01-cvpr24-our-work-eagle/","publishdate":"2024-02-01T00:00:00Z","relpermalink":"/post/2024-02-01-cvpr24-our-work-eagle/","section":"post","summary":"Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation accepted to CVPR'24 as a Highlight paper. Congrats to co-authors Chanyoung Kim, Woojung Han, and Dayun Ju!\n","tags":null,"title":"EAGLE accepted to CVPR'24 as a Highlight paper Congrats to co-authors Chanyoung Kim, Woojung Han, and Dayun Ju!","type":"post"},{"authors":null,"categories":null,"content":"","date":1704067200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1704067200,"objectID":"ce93c9f0baf674dc6c52b585ad1022dc","permalink":"http://localhost:1313/post/2024-01-01-yeongjun-jun-sujung-hong-junhyeok-kim-jiwoo-park-and-suhyun-kim-join-our-lab/","publishdate":"2024-01-01T00:00:00Z","relpermalink":"/post/2024-01-01-yeongjun-jun-sujung-hong-junhyeok-kim-jiwoo-park-and-suhyun-kim-join-our-lab/","section":"post","summary":"","tags":null,"title":"Yeongjun Jun, Sujung Hong, Junhyeok Kim, Jiwoo Park, and Suhyun Kim join our lab","type":"post"},{"authors":null,"categories":null,"content":"인공지능 기반의 안드로겐 탈모 진단 시스템 연구개발\n","date":1690934400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1690934400,"objectID":"265427f063c860300081d46b8e68ad97","permalink":"http://localhost:1313/post/2023-08-02-funding-%EC%84%9C%EC%9A%B8%ED%98%81%EC%8B%A0%EC%B1%8C%EB%A6%B0%EC%A7%80%EC%98%88%EC%84%A0/","publishdate":"2023-08-02T00:00:00Z","relpermalink":"/post/2023-08-02-funding-%EC%84%9C%EC%9A%B8%ED%98%81%EC%8B%A0%EC%B1%8C%EB%A6%B0%EC%A7%80%EC%98%88%EC%84%A0/","section":"post","summary":"인공지능 기반의 안드로겐 탈모 진단 시스템 연구개발\n","tags":null,"title":"Funding - 서울혁신챌린지(예선)","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Mahbaneh!\n","date":1690848000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1690848000,"objectID":"5c93bc68d6bb8e7c336a6c07fc5f33b7","permalink":"http://localhost:1313/post/2023-08-01-accepted-to-medical-image-analysis/","publishdate":"2023-08-01T00:00:00Z","relpermalink":"/post/2023-08-01-accepted-to-medical-image-analysis/","section":"post","summary":"Congrats to Mahbaneh!\n","tags":null,"title":"Accepted to Medical Image Analysis","type":"post"},{"authors":null,"categories":null,"content":"CT 영상 화질개선을 위한 인공지능 연구실\n","date":1688256000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1688256000,"objectID":"4836d8534f8206c914cbc170dd9f17d3","permalink":"http://localhost:1313/post/2023-07-02-funding-%ED%95%9C%EA%B5%AD%EC%97%B0%EA%B5%AC%EC%9E%AC%EB%8B%A8---%EA%B8%B0%EC%B4%88%EC%97%B0%EA%B5%AC%EC%8B%A4/","publishdate":"2023-07-02T00:00:00Z","relpermalink":"/post/2023-07-02-funding-%ED%95%9C%EA%B5%AD%EC%97%B0%EA%B5%AC%EC%9E%AC%EB%8B%A8---%EA%B8%B0%EC%B4%88%EC%97%B0%EA%B5%AC%EC%8B%A4/","section":"post","summary":"CT 영상 화질개선을 위한 인공지능 연구실\n","tags":null,"title":"Funding - 한국연구재단 / 기초연구실","type":"post"},{"authors":null,"categories":null,"content":"바이오 빅데이터 기반 당뇨병 및 합병증 정밀 의료를 위한 AI 플랫폼 및 모델 개발\n","date":1688169600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1688169600,"objectID":"f8e53c3f8400267a46789d3d4dc3dbec","permalink":"http://localhost:1313/post/2023-07-01-funding-%ED%95%9C%EA%B5%AD%EC%97%B0%EA%B5%AC%EC%9E%AC%EB%8B%A8-%EB%8D%B0%EC%9D%B4%ED%84%B0-%EA%B8%B0%EB%B0%98-%EB%94%94%EC%A7%80%ED%84%B8-%EB%B0%94%EC%9D%B4%EC%98%A4-%EC%84%A0%EB%8F%84%EC%82%AC%EC%97%85/","publishdate":"2023-07-01T00:00:00Z","relpermalink":"/post/2023-07-01-funding-%ED%95%9C%EA%B5%AD%EC%97%B0%EA%B5%AC%EC%9E%AC%EB%8B%A8-%EB%8D%B0%EC%9D%B4%ED%84%B0-%EA%B8%B0%EB%B0%98-%EB%94%94%EC%A7%80%ED%84%B8-%EB%B0%94%EC%9D%B4%EC%98%A4-%EC%84%A0%EB%8F%84%EC%82%AC%EC%97%85/","section":"post","summary":"바이오 빅데이터 기반 당뇨병 및 합병증 정밀 의료를 위한 AI 플랫폼 및 모델 개발\n","tags":null,"title":"Funding - 한국연구재단/데이터 기반 디지털 바이오 선도사업","type":"post"},{"authors":["Woojung Han*","Chanyoung Kim*","Dayun Ju","Yumin Shim","Seong Jae Hwang"],"categories":null,"content":"","date":1682899200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1682899200,"objectID":"e53fe15e44f0a6bcaa17d1aa78d58172","permalink":"http://localhost:1313/publication/2024-miccai-cxrl/","publishdate":"2023-05-01T00:00:00Z","relpermalink":"/publication/2024-miccai-cxrl/","section":"publication","summary":"MICCAI 2024 (Early Accept, top 11% of submissions)","tags":null,"title":"Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning","type":"publication"},{"authors":["Kyobin Choo","Youngjun Jun","Mijin Yun","Seong Jae Hwang"],"categories":null,"content":"","date":1682899200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1682899200,"objectID":"6712562cbf912247247a5a477dbd1e80","permalink":"http://localhost:1313/ct2mri2024/","publishdate":"2023-05-01T00:00:00Z","relpermalink":"/ct2mri2024/","section":"publication","summary":"MICCAI 2024 (Early Accept, top 11% of submissions)","tags":null,"title":"Slice-Consistent 3D Volumetric Brain CT-to-MRI Translation with 2D Brownian Bridge Diffusion Model","type":"publication"},{"authors":["Seil Kang","Donghyun Kim","Junhyeok Kim","Seong Jae Hwang"],"categories":null,"content":"","date":1682899200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1682899200,"objectID":"7f92ffd9dfec1aed7f3945997519b7b4","permalink":"http://localhost:1313/publication/2024-arxiv-wolf/","publishdate":"2023-05-01T00:00:00Z","relpermalink":"/publication/2024-arxiv-wolf/","section":"publication","summary":"arxiv","tags":null,"title":"WoLF: Large Language Model Framework for CXR Understanding","type":"publication"},{"authors":null,"categories":null,"content":"","date":1677628800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1677628800,"objectID":"873b25719908051a819e93a145d6a452","permalink":"http://localhost:1313/post/2023-03-01-dayun-ju-and-chanyoung-kim-join-our-lab/","publishdate":"2023-03-01T00:00:00Z","relpermalink":"/post/2023-03-01-dayun-ju-and-chanyoung-kim-join-our-lab/","section":"post","summary":"","tags":null,"title":"Dayun Ju and Chanyoung Kim join our lab","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Anthony!\n","date":1675209600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1675209600,"objectID":"fac76a5ede08bed8f981d7df493c5043","permalink":"http://localhost:1313/post/2023-02-01-accepted-for-machine-learning-on-theoretical-domain-generalization/","publishdate":"2023-02-01T00:00:00Z","relpermalink":"/post/2023-02-01-accepted-for-machine-learning-on-theoretical-domain-generalization/","section":"post","summary":"Congrats to Anthony!\n","tags":null,"title":"Accepted for Machine Learning on theoretical domain generalization","type":"post"},{"authors":["Chanyoung Kim*","Woojung Han*","Dayun Ju","Seong Jae Hwang"],"categories":null,"content":"","date":1675209600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1675209600,"objectID":"a0739f6fe549803ad66f87463165620a","permalink":"http://localhost:1313/publication/2024-cvpr-eagle/","publishdate":"2023-02-01T00:00:00Z","relpermalink":"/publication/2024-cvpr-eagle/","section":"publication","summary":"CVPR 2024 (Highlight)","tags":null,"title":"EAGLE: Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation","type":"publication"},{"authors":null,"categories":null,"content":"","date":1672617600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1672617600,"objectID":"b3064751b150f3d00cff86f7e38e6b4b","permalink":"http://localhost:1313/post/2023-01-02-gayoon-choi-taejin-jeong-and-jeahoon-joo-join-our-lab/","publishdate":"2023-01-02T00:00:00Z","relpermalink":"/post/2023-01-02-gayoon-choi-taejin-jeong-and-jeahoon-joo-join-our-lab/","section":"post","summary":"","tags":null,"title":"Gayoon Choi, Taejin Jeong, and Jeahoon Joo join our lab","type":"post"},{"authors":null,"categories":null,"content":"","date":1672531200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1672531200,"objectID":"1c1134d4bad091d6478dea8f49e5af6d","permalink":"http://localhost:1313/post/2023-01-01-accepted-for-isbi-2023/","publishdate":"2023-01-01T00:00:00Z","relpermalink":"/post/2023-01-01-accepted-for-isbi-2023/","section":"post","summary":"","tags":null,"title":"Accepted for ISBI 2023","type":"post"},{"authors":null,"categories":null,"content":"","date":1666569600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1666569600,"objectID":"c1d17ff2b20dca0ad6653a3161942b64","permalink":"http://localhost:1313/people/","publishdate":"2022-10-24T00:00:00Z","relpermalink":"/people/","section":"","summary":"","tags":null,"title":"Member","type":"landing"},{"authors":null,"categories":null,"content":"","date":1659312000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1659312000,"objectID":"d69f29f28d3eb66d0cf507360246fa29","permalink":"http://localhost:1313/post/2022-08-01-our-paper-won-the-best-paper-award-at-uai-2022/","publishdate":"2022-08-01T00:00:00Z","relpermalink":"/post/2022-08-01-our-paper-won-the-best-paper-award-at-uai-2022/","section":"post","summary":"","tags":null,"title":"Our paper won the Best Paper Award at UAI 2022","type":"post"},{"authors":null,"categories":null,"content":"","date":1656633600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1656633600,"objectID":"24180519862d7f050100ff08ed0e131c","permalink":"http://localhost:1313/post/2022-07-01-yumin-kim-seil-kang-kyobin-choo-hyunjin-kim-and-donghyun-kim-join-our-lab/","publishdate":"2022-07-01T00:00:00Z","relpermalink":"/post/2022-07-01-yumin-kim-seil-kang-kyobin-choo-hyunjin-kim-and-donghyun-kim-join-our-lab/","section":"post","summary":"","tags":null,"title":"Yumin Kim, Seil Kang, Kyobin Choo, Hyunjin Kim, and Donghyun Kim join our lab","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Anthony!\n","date":1651363200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1651363200,"objectID":"f288b5349e22ee71e4a48e3ddc2ad6ae","permalink":"http://localhost:1313/post/2022-05-01-accepted-for-uai-2022-eindhoven-the-netherlands-for-an-oral-presentation/","publishdate":"2022-05-01T00:00:00Z","relpermalink":"/post/2022-05-01-accepted-for-uai-2022-eindhoven-the-netherlands-for-an-oral-presentation/","section":"post","summary":"Congrats to Anthony!\n","tags":null,"title":"Accepted for UAI 2022, Eindhoven, the Netherlands for an Oral Presentation","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Xingchen and Anthony!\n","date":1648771200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1648771200,"objectID":"592e0dff31477386c301c532a34513bd","permalink":"http://localhost:1313/post/2022-04-01-accepted-for-ijcai-2022-vienna/","publishdate":"2022-04-01T00:00:00Z","relpermalink":"/post/2022-04-01-accepted-for-ijcai-2022-vienna/","section":"post","summary":"Congrats to Xingchen and Anthony!\n","tags":null,"title":"Accepted for IJCAI 2022, Vienna","type":"post"},{"authors":null,"categories":null,"content":"","date":1647129600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1647129600,"objectID":"486da6bf0d12cb71f50d6bfd0abd48ac","permalink":"http://localhost:1313/post/2022-03-01-joining-as-an-assistant-professor-in-the-department-of-artificial-intelligence-at-yonsei-university/","publishdate":"2022-03-13T00:00:00Z","relpermalink":"/post/2022-03-01-joining-as-an-assistant-professor-in-the-department-of-artificial-intelligence-at-yonsei-university/","section":"post","summary":"","tags":null,"title":"Joining as an Assistant Professor in the Department of Artificial Intelligence at Yonsei University","type":"post"},{"authors":null,"categories":null,"content":"","date":1646265600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1646265600,"objectID":"2aad396d68013183207138be2b946fbf","permalink":"http://localhost:1313/post/2022-03-03-yujin-yang-and-woojung-han-join-our-lab/","publishdate":"2022-03-03T00:00:00Z","relpermalink":"/post/2022-03-03-yujin-yang-and-woojung-han-join-our-lab/","section":"post","summary":"","tags":null,"title":"Yujin Yang and Woojung Han join our lab","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Anthony!\n","date":1646179200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1646179200,"objectID":"4cfe26d1b89d8c42381cec0afc13fad3","permalink":"http://localhost:1313/post/2022-03-02-accepted-for-findings-of-acl-2022/","publishdate":"2022-03-02T00:00:00Z","relpermalink":"/post/2022-03-02-accepted-for-findings-of-acl-2022/","section":"post","summary":"Congrats to Anthony!\n","tags":null,"title":"Accepted for Findings of ACL 2022","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Mahbaneh!\n","date":1633651200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1633651200,"objectID":"3f241bd46df500ce38daa21785f6c712","permalink":"http://localhost:1313/post/2021-10-08-accepted-for-neuroimage/","publishdate":"2021-10-08T00:00:00Z","relpermalink":"/post/2021-10-08-accepted-for-neuroimage/","section":"post","summary":"Congrats to Mahbaneh!\n","tags":null,"title":"Accepted for NeuroImage","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Mahbaneh!\n","date":1628899200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1628899200,"objectID":"16bc5efd56c7afdd7277f637d02b030e","permalink":"http://localhost:1313/post/2021-08-14-accepted-for-the-first-workshop-on-computer-vision-for-automated-medical-diagnosis-@-iccv-2021/","publishdate":"2021-08-14T00:00:00Z","relpermalink":"/post/2021-08-14-accepted-for-the-first-workshop-on-computer-vision-for-automated-medical-diagnosis-@-iccv-2021/","section":"post","summary":"Congrats to Mahbaneh!\n","tags":null,"title":"Accepted for The First Workshop on Computer Vision for Automated Medical Diagnosis @ ICCV 2021","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Anthony!\n","date":1623024000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1623024000,"objectID":"c1432ced9dbc3ff7232c8d764ddb2f60","permalink":"http://localhost:1313/post/2021-06-07-accepted-for-miccai-2021-virtual/","publishdate":"2021-06-07T00:00:00Z","relpermalink":"/post/2021-06-07-accepted-for-miccai-2021-virtual/","section":"post","summary":"Congrats to Anthony!\n","tags":null,"title":"Accepted for MICCAI 2021, Virtual","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Shibo!\n","date":1621123200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1621123200,"objectID":"d8acb15c32f13cc4f0f8af14c71afdd9","permalink":"http://localhost:1313/post/2021-05-16-accepted-for-midl-2021-virtual/","publishdate":"2021-05-16T00:00:00Z","relpermalink":"/post/2021-05-16-accepted-for-midl-2021-virtual/","section":"post","summary":"Congrats to Shibo!\n","tags":null,"title":"Accepted for MIDL 2021, Virtual","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Mahbaneh!\n","date":1615420800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1615420800,"objectID":"b26f9fae26815dfb3169f2893c00549e","permalink":"http://localhost:1313/post/2021-03-11-accepted-for-aaic-2021-denver-usa/","publishdate":"2021-03-11T00:00:00Z","relpermalink":"/post/2021-03-11-accepted-for-aaic-2021-denver-usa/","section":"post","summary":"Congrats to Mahbaneh!\n","tags":null,"title":"Accepted for AAIC 2021, Denver, USA","type":"post"},{"authors":null,"categories":null,"content":"Congrats to Anthony and Xingchen!\n","date":1614384000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1614384000,"objectID":"704362a507c4fea9f73890e236b13825","permalink":"http://localhost:1313/post/2021-02-27-two-full-papers-accepted-for-isbi-2021-virtual/","publishdate":"2021-02-27T00:00:00Z","relpermalink":"/post/2021-02-27-two-full-papers-accepted-for-isbi-2021-virtual/","section":"post","summary":"Congrats to Anthony and Xingchen!\n","tags":null,"title":"Two Full Papers Accepted for ISBI 2021, Virtual","type":"post"},{"authors":null,"categories":null,"content":"","date":1612915200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1612915200,"objectID":"9254ac1edc77a4108a4fc2c6e0b1000a","permalink":"http://localhost:1313/post/2021-02-10-accepted-for-ipmi-2021-virtual/","publishdate":"2021-02-10T00:00:00Z","relpermalink":"/post/2021-02-10-accepted-for-ipmi-2021-virtual/","section":"post","summary":"","tags":null,"title":"Accepted for IPMI 2021, Virtual","type":"post"},{"authors":null,"categories":null,"content":"A bias-resilient deep learning algorithm for robust white matter hyperintensity segmentation on Alzheimer’s disease data with confounding factors\n","date":1608076800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1608076800,"objectID":"45d209567ad96fac5de130c1a736d65d","permalink":"http://localhost:1313/post/2020-12-16-received-alzheimers-disease-research-center-developmental-project-grant-by-pitt-adrc-for-2021-2022/","publishdate":"2020-12-16T00:00:00Z","relpermalink":"/post/2020-12-16-received-alzheimers-disease-research-center-developmental-project-grant-by-pitt-adrc-for-2021-2022/","section":"post","summary":"A bias-resilient deep learning algorithm for robust white matter hyperintensity segmentation on Alzheimer’s disease data with confounding factors\n","tags":null,"title":"Received Alzheimer's Disease Research Center Developmental Project grant by Pitt ADRC for 2021-2022","type":"post"},{"authors":null,"categories":null,"content":"Congratulations to Jian Yang and Monica Hall for winning the Best Paper Award at the 2020 Conference on Wowchemy for their paper “Learning Wowchemy”.\n","date":1606867200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1606867200,"objectID":"2a0ec8a990dbd78a00c4e15a09364b00","permalink":"http://localhost:1313/post/20-12-02-icml-best-paper/","publishdate":"2020-12-02T00:00:00Z","relpermalink":"/post/20-12-02-icml-best-paper/","section":"post","summary":"Congratulations to Jian Yang and Monica Hall for winning the Best Paper Award at the 2020 Conference on Wowchemy for their paper “Learning Wowchemy”.\n","tags":null,"title":"Bye","type":"post"},{"authors":null,"categories":null,"content":"Congratulations to Richard Hendricks for winning first place in the Wowchemy Prize.\nExplain\n","date":1606780800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1606780800,"objectID":"be2bd15f022f0d83fe9ffd743881e70c","permalink":"http://localhost:1313/post/20-12-01-wowchemy-prize/","publishdate":"2020-12-01T00:00:00Z","relpermalink":"/post/20-12-01-wowchemy-prize/","section":"post","summary":"Congratulations to Richard Hendricks for winning first place in the Wowchemy Prize.\n","tags":null,"title":"Hello","type":"post"},{"authors":null,"categories":null,"content":"","date":1603411200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1603411200,"objectID":"d88e0420aec9859031ea63f15d545600","permalink":"http://localhost:1313/post/2020-10-23-accepted-for-the-anatomical-record/","publishdate":"2020-10-23T00:00:00Z","relpermalink":"/post/2020-10-23-accepted-for-the-anatomical-record/","section":"post","summary":"","tags":null,"title":"Accepted for The Anatomical Record","type":"post"},{"authors":null,"categories":null,"content":"","date":1593648000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1593648000,"objectID":"cd0f4376ad0a354d6e5cfe8a7bd97b67","permalink":"http://localhost:1313/post/2020-07-02-accepted-for-an-oral-presentation-at-the-workshop-on-bioimage-computing-@-eccv-2020/","publishdate":"2020-07-02T00:00:00Z","relpermalink":"/post/2020-07-02-accepted-for-an-oral-presentation-at-the-workshop-on-bioimage-computing-@-eccv-2020/","section":"post","summary":"","tags":null,"title":"Accepted for an Oral Presentation at The Workshop on BioImage Computing @ ECCV 2020","type":"post"},{"authors":null,"categories":null,"content":"Congratulations!\n","date":1587513600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1587513600,"objectID":"e4b691a58382c9d333f96279c4afeaee","permalink":"http://localhost:1313/post/2020-04-22-xingchen-zhao-received-the-sci-undergraduate-research-scholars-award-for-summer-2020/","publishdate":"2020-04-22T00:00:00Z","relpermalink":"/post/2020-04-22-xingchen-zhao-received-the-sci-undergraduate-research-scholars-award-for-summer-2020/","section":"post","summary":"Congratulations!\n","tags":null,"title":"Xingchen Zhao received the SCI Undergraduate Research Scholars award for Summer 2020","type":"post"},{"authors":null,"categories":null,"content":"","date":1578009600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1578009600,"objectID":"51e1a843b2b7cd5c2743d287a11a1237","permalink":"http://localhost:1313/post/2020-01-03-accepted-for-isbi-2020-iowa-city-usa/","publishdate":"2020-01-03T00:00:00Z","relpermalink":"/post/2020-01-03-accepted-for-isbi-2020-iowa-city-usa/","section":"post","summary":"","tags":null,"title":"Accepted for ISBI 2020, Iowa City, USA","type":"post"},{"authors":null,"categories":null,"content":"","date":1571443200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1571443200,"objectID":"b44b6875b186da4bb05efe3137c52234","permalink":"http://localhost:1313/post/2019-10-19-joining-as-an-assistant-professor-in-the-department-of-computer-science-at-the-university-of-pittsburgh/","publishdate":"2019-10-19T00:00:00Z","relpermalink":"/post/2019-10-19-joining-as-an-assistant-professor-in-the-department-of-computer-science-at-the-university-of-pittsburgh/","section":"post","summary":"","tags":null,"title":"Joining as an Assistant Professor in the Department of Computer Science at the University of Pittsburgh","type":"post"},{"authors":null,"categories":null,"content":" Create your slides in Markdown - click the Slides button to check out the example. Add the publication’s full text or supplementary notes here. You can use rich formatting such as including code, math, and images.\n","date":1554595200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1554595200,"objectID":"c645ab16c36678119f3be6cebedb4a98","permalink":"http://localhost:1313/publication-dummy/preprint/","publishdate":"2017-01-01T00:00:00Z","relpermalink":"/publication-dummy/preprint/","section":"publication-dummy","summary":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis posuere tellus ac convallis placerat. Proin tincidunt magna sed ex sollicitudin condimentum.","tags":["Source Themes"],"title":"An example preprint / working paper","type":"publication-dummy"},{"authors":["Robert Ford"],"categories":null,"content":" Click the Cite button above to demo the feature to enable visitors to import publication metadata into their reference management software. Create your slides in Markdown - click the Slides button to check out the example. Add the publication’s full text or supplementary notes here. You can use rich formatting such as including code, math, and images.\n","date":1441065600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1441065600,"objectID":"74175087caa584901180f346cddd6ef2","permalink":"http://localhost:1313/publication-dummy/journal-article/","publishdate":"2017-01-01T00:00:00Z","relpermalink":"/publication-dummy/journal-article/","section":"publication-dummy","summary":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis posuere tellus ac convallis placerat. Proin tincidunt magna sed ex sollicitudin condimentum.","tags":["Source Themes"],"title":"An example journal article","type":"publication-dummy"},{"authors":["Robert Ford"],"categories":null,"content":" Click the Cite button above to demo the feature to enable visitors to import publication metadata into their reference management software. Create your slides in Markdown - click the Slides button to check out the example. Add the publication’s full text or supplementary notes here. You can use rich formatting such as including code, math, and images.\n","date":1372636800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1372636800,"objectID":"d490c8bbd639f0a713b2dbb7172bb554","permalink":"http://localhost:1313/publication-dummy/conference-paper/","publishdate":"2017-01-01T00:00:00Z","relpermalink":"/publication-dummy/conference-paper/","section":"publication-dummy","summary":".","tags":[],"title":"An example conference paper","type":"publication-dummy"},{"authors":null,"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"f26b5133c34eec1aa0a09390a36c2ade","permalink":"http://localhost:1313/admin/config.yml","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/admin/config.yml","section":"","summary":"","tags":null,"title":"","type":"decap_cms"},{"authors":null,"categories":null,"content":"\u0026lt;!DOCTYPE html\u0026gt; CoBra CoBra: Complementary Branch Fusing Class and Semantic Knowledge for Robust Weakly Supervised Semantic Segmentation code Code Overview illustration of our model, Complementary Branch (CoBra). The dual branch framework consists of the Class-Aware Knowledge branch with CNN and the Semantic-Aware Knowledge branch with ViT. They give complementary knowledge to each branch. Motivation While Class Activation Maps (CAMs) using CNNs have steadily been contributing to the success of WSSS, the resulting activation maps often narrowly focus on class-specific parts (e.g., only face of human). On the other hand, recent works based on vision transformers (ViT) have shown promising results based on their self-attention mechanism to capture the semantic parts but fail in capturing complete class-specific details (e.g., entire body parts of human but also with a dog nearby). The figure shows the comparison of object localization maps from each CNN, ViT, and Cobra branches for various subjects (human, dog, airplane), illustrating the distinctive areas of interest each model identifies. Our model successfully utilizes complementary characteristics to localize the exact object of the correct class and its semantic parts. Key Contribution We propose a dual branch framework, namely Complementary Branch (CoBra), which aims to fuse the complementary nature of CNN and ViT localization maps. We capture the class and semantic knowledge as Class Aware Projection (CAP) and Semantic-Aware Projection (SAP) respectively for effective complementary guidance to the CNN and ViT branches in CoBra, employing contrastive learning for enhanced guidance. Extensive experiments qualitatively and quantitatively investigate how CNN and ViT complement each other on the PASCAL VOC 2012 dataset and MS COCO 2014 dataset, showing a state-of-the-art WSSS result. Main model Overview illustration of our model. (I) Class Aware Knoweldge(CAK): The CNN outputs a feature map which generates (1) CNN CAMs via $f_{CAM}$, (2) Pseudo-Labels from CNN CAMs via $argmax$, and (3) Class-Aware Projection (CAP) via $f_{proj}$. (II) Semantic Aware Knowledge(SAK): The ViT outputs $N^2$ Patch Embeddings which generate (1) ViT CAMs via $f_{CAM}$ and (2) Semantic-Aware Projection (SAP) via $f_{proj}$. We also use the Attention Maps of all $L$-layers to generate (3) Patch Affinity of size $N^2 \\times N^2$. Method Illustration of refining CAP and SAP from SAK and CAK branch respectively. (I) Class Aware Knoweldge(CAK): The CAP values are embedded in the Class Feature Space. (1) The patch affinity from SAK branch assigns the positive (green), negative (red), and neutral (teal) patches based on the target (white) patch. (2) The CNN CAM shows that the false negative patches have been weakly localized as horse. (3) The CAP loss pull those weakly localized patches (i.e., false class negatives) since they are assigned as semantically positive patches based on SAK branch. (3) The CAP is refined to improve the CNN CAM showing fewer false class negatives. (II) Semantic Aware Knowledge(SAK): The SAP values are embedded in the Semantic Feature Space. (1) The CNN CAM from CAK branch assigns the positive (green), negative (red), and neutral (teal) patches based on the target (white) patch. (2) The ViT CAM shows that the negative patches have been incorrectly localized as horse. The SAP loss pushes away those incorrectly localized patches (i.e., false class positives) since they are assigned as negative patches based on CAK branch. (3) The SAP is refined to improve the ViT CAM showing fewer false class positives. Quantitative Experiments Pascal VOC 2012 seed \u0026amp; mask results Evaluation of initial seed and corresponding pseudo segmentation mask on PASCAL VOC 2012 training set in mIoU (%). Pascal VOC 2012 segmentation results Semantic segmentation results on the validation (Val) and Test set of PASCAL VOC 2012 dataset. Sup. (Supervision) : Image (I) and Saliency Map (S). MS-COCO 2014 segmentation results Semgentation mIoU results(%) on MS-COCO 2014 val dataset Qualitative Experiments Seed Results Qualitative results. From left: (1) Input image, (2) Our result, (3) CNN CAM of our model, (4) Ours without SAP Loss, (5) ViT CAM of our model, (6) Ours without CAP Loss, (7) Our Pseudo mask for segmentation and (8) ground-truth segmentation label. We see that our results are able to differentiate between classes while finding their accurate object boundaries. Pascal VOC Segmentation Results Qualitative seg results on the PASCAL VOC val set. MS COCO Segmentation Results Qualitative seg results on the MS COCO val set. This website is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. This means you are free to borrow the source code of this website, we just ask that you link back to this page in the footer. Please remember to remove the analytics code included in the header of the website which you do not want on your website. ","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"c8c40f72d166810ae9da720b8b232b38","permalink":"http://localhost:1313/cobra2024/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/cobra2024/","section":"","summary":"\u003c!DOCTYPE html\u003e CoBra CoBra: Complementary Branch Fusing Class and Semantic Knowledge for Robust Weakly Supervised Semantic Segmentation code Code Overview illustration of our model, Complementary Branch (CoBra). The dual branch framework consists of the Class-Aware Knowledge branch with CNN and the Semantic-Aware Knowledge branch with ViT.","tags":null,"title":"","type":"page"},{"authors":null,"categories":null,"content":"\u0026lt;!DOCTYPE html\u0026gt; CXRL Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning Early Accept @ MICCAI 2024 Woojung Han*, Chanyoung Kim*, Dayun Ju, Yumin Shim, Seong Jae Hwang Yonsei University Paper arXiv Code We introduce CXRL, Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning. Abstract Recent advances in text-conditioned image generation diffusion models have begun paving the way for new opportunities in modern medical domain, in particular, generating Chest X-rays (CXRs) from diagnostic reports. Nonetheless, to further drive the diffusion models to generate CXRs that faithfully reflect the complexity and diversity of real data, it has become evident that a nontrivial learning approach is needed. In light of this, we propose CXRL, a framework motivated by the potential of reinforcement learning (RL). Specifically, we integrate a policy gradient RL approach with well-designed multiple distinctive CXR-domain specific reward models. This approach guides the diffusion denoising trajectory, achieving precise CXR posture and pathological details. Here, considering the complex medical image environment, we present “RL with Comparative Feedback” (RLCF) for the reward mechanism, a human-like comparative evaluation that is known to be more effective and reliable in complex scenarios compared to direct evaluation. Our CXRL framework includes jointly optimizing learnable adaptive condition embeddings (ACE) and the image generator, enabling the model to produce more accurate and higher perceptual CXR quality. Our extensive evaluation of the MIMIC-CXR-JPG dataset demonstrates the effectiveness of our RL-based tuning approach. Consequently, our CXRL generates pathologically realistic CXRs, establishing a new standard for generating CXRs with high fidelity to real-world clinical scenarios. Video Method Pipeline The pipeline of CXRL. Our model employs policy gradient optimization utilizing multi-reward feedback, fine-tuning image generator, and ACE to produce realistic and accurate CXR that corresponds closely to the input report. Contribution Our study pioneers in applying RL to text-conditioned medical image synthesis, particularly in CXRs, focusing on detail refinement and input condition control for clinical accuracy. We advance report-to-CXR generation with an RLCF-based rewarding mechanism, emphasizing posture alignment, pathology accuracy, and consistency between input reports and generated CXRs. We jointly optimize the image generator and ACE via reward feedback models, ensuring image-text alignment and medical accuracy across varied reports, setting a new benchmark in a report-to-CXR generation. Reward Feedback Models A detailed illustration of our reward feedback models. We incorporate three different feedbacks for report-to-CXR generation model to generate goal-oriented CXRs. Posture Alignment Feedback: Generated CXRs often face scaling issues, like excessive zooming or rotation, obscuring essential details. To counter these undesirable effects, we introduce a reward signal to align the CXR\u0026#39;s posture with a canonical orientation to preserve essential parts. Diagnostic Condition Feedback: To accurately reflect generated CXRs with referenced pathologies, we classify them using a parsed report label, rewarding its accuracy. Multimodal Consistency Feedback: We enforce the generated CXRs to better match their reports. We leverage a multimodal latent representation pretrained with CXR-report pairs for semantic agreement assessment. Qualitative Results Comparison between previous models and ours Comparison between previous state-of-the-art report-to-CXR generation models [19,3] and ours. The blue and green texts match their corresponding colored arrows. Additional Qualitative results Additional qualitative results of our framework comparing against baselines. The colored texts match their corresponding colored arrows. Ours w/o ACE or RLCF demonstrates superior report agreement and posture alignment compared to other baselines. CXRL is observed to generate more advanced high-fidelity CXRs that highlight our methodology\u0026#39;s effectiveness in synthesizing clinically accurate medical images. Qualitative ablation on each reward model (a): CXRL shows significantly better alignment of the clavicle and costophrenic angle compared to the anchor regarding posture alignment. (b): CXRL demonstrates improved predictive diagnostic accuracy, closely matching the GT and enhancing clinical decision-making (c): The multimodal consistency reward ensures that CXRs and reports correspond well, as observed by arrows and text in matching colors. Evaluation of generated CXRs from multiple feedback perspectives Evaluation Metrics The table compares the performance of various methods using three evaluation metrics. CXR Quality Table Comparative analysis of generated CXR quality: (a) quantitatively compares established models using FID and MS-SSIM metrics; (b) evaluates the impact of …","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"b6ade3eb7d0d5642a41d5c4a517e32b6","permalink":"http://localhost:1313/cxrl2024/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/cxrl2024/","section":"","summary":"\u003c!DOCTYPE html\u003e CXRL Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning Early Accept @ MICCAI 2024 Woojung Han*, Chanyoung Kim*, Dayun Ju, Yumin Shim, Seong Jae Hwang Yonsei University Paper arXiv Code We introduce CXRL, Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning.","tags":null,"title":"","type":"page"},{"authors":null,"categories":null,"content":"\u0026lt;!DOCTYPE html\u0026gt; EAGLE EAGLE🦅: Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation Highlight @ CVPR 2024 Chanyoung Kim*, Woojung Han*, Dayun Ju, Seong Jae Hwang Yonsei University Paper arXiv Code We introduce EAGLE, Eigen AGgregation LEarning for object-centric unsupervised semantic segmentation. Abstract Semantic segmentation has innately relied on extensive pixel-level labeled annotated data, leading to the emergence of unsupervised methodologies. Among them, leveraging self-supervised Vision Transformers for unsupervised semantic segmentation (USS) has been making steady progress with expressive deep features. Yet, for semantically segmenting images with complex objects, a predominant challenge remains: the lack of explicit object-level semantic encoding in patch-level features. This technical limitation often leads to inadequate segmentation of complex objects with diverse structures. To address this gap, we present a novel approach, EAGLE, which emphasizes object-centric representation learning for unsupervised semantic segmentation. Specifically, we introduce EiCue, a spectral technique providing semantic and structural cues through an eigenbasis derived from the semantic similarity matrix of deep image features and color affinity from an image. Further, by incorporating our object-centric contrastive loss with EiCue, we guide our model to learn object-level representations with intra- and inter-image object-feature consistency, thereby enhancing semantic accuracy. Extensive experiments on COCO-Stuff, Cityscapes, and Potsdam-3 datasets demonstrate the state-of-the-art USS results of EAGLE with accurate and consistent semantic segmentation across complex scenes. Video Method Pipeline The pipeline of EAGLE. Leveraging the Laplacian matrix, which integrates hierarchically projected image key features and color affinity, the model exploits eigenvector clustering to capture object-level perspective cues defined as \\( \\mathrm{\\mathcal{M}}_{eicue} \\) and \\( \\mathrm{\\tilde{\\mathcal{M}}_{eicue}} \\). Distilling knowledge from \\( \\mathrm{\\mathcal{M}}_{eicue} \\), our model further adopts an object-centric contrastive loss, utilizing the projected vector \\( \\mathrm{Z} \\) and \\( \\mathrm{\\tilde{Z}} \\). The learnable prototype \\( \\mathrm{\\Phi} \\) assigned from \\( \\mathrm{Z} \\) and \\( \\mathrm{\\tilde{Z}} \\), acts as a singular anchor that contrasts positive objects and negative objects. Our object-centric contrastive loss is computed in two distinct manners: intra(\\( \\mathrm{\\mathcal{L}}_{obj} \\))- and inter(\\( \\mathrm{\\mathcal{L}}_{sc} \\))-image to ensure semantic consistency. Eigen Aggregation Module An illustration of the EiCue generation. From the input image, both color affinity matrix \\( \\mathrm{A_{color}} \\) and semantic similarity matrix \\( \\mathrm{A_{seg}} \\) are derived, which are combined to form the Laplacian \\( \\mathrm{L_{sym}} \\). An eigenvector subset \\( \\mathrm{\\hat{V}} \\) of \\( \\mathrm{L_{sym}} \\) are clustered to produce EiCue. Visualization of Primary Elements Eigenvectors Visualizing eigenvectors derived from \\( \\mathrm{S} \\) in the Eigen Aggregation Module. These eigenvectors not only distinguish different objects but also identify semantically related areas, highlighting how EiCue captures object semantics and boundaries effectively. EiCue Comparison between K-means and EiCue. The bottom row presents EiCue, highlighting its superior ability to capture subtle structural intricacies and understand deeper semantic relationships, which is not as effectively achieved by K-means. Qualitative Results COCO-Stuff Qualitative results of COCO-Stuff dataset trained with ViT-S/8 backbone. Cityscapes Qualitative results of Cityscapes dataset trained with ViT-B/8 backbone. Potsdam-3 Qualitative results of Potsdam-3 dataset trained with ViT-B/8 backbone. Quantitative Results COCO-Stuff Quantitative results on the COCO-Stuff dataset. Cityscapes Quantitative results on the Cityscapes dataset. Potsdam-3 Quantitative results on the Potsdam-3 dataset. BibTeX @InProceedings{2024eagle, author = {Kim, Chanyoung and Han, Woojung and Ju, Dayun and Hwang, Seong Jae}, title = {EAGLE: Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2024} } This website is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. This means you are free to borrow the source code of this website, we just ask that you link back to this page in the footer. Please remember to remove the analytics code included in the header of the website which you do not want on your website. ","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"eeeb8f60f540691edb50ec50a184a95a","permalink":"http://localhost:1313/eagle2024/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/eagle2024/","section":"","summary":"\u003c!DOCTYPE html\u003e EAGLE EAGLE🦅: Eigen Aggregation Learning for Object-Centric Unsupervised Semantic Segmentation Highlight @ CVPR 2024 Chanyoung Kim*, Woojung Han*, Dayun Ju, Seong Jae Hwang Yonsei University Paper arXiv Code We introduce EAGLE, Eigen AGgregation LEarning for object-centric unsupervised semantic segmentation.","tags":null,"title":"","type":"page"}]
\ No newline at end of file
diff --git a/index.xml b/index.xml
index 494f2fb..9915577 100644
--- a/index.xml
+++ b/index.xml
@@ -125,9 +125,9 @@
Slice-Consistent 3D Volumetric Brain CT-to-MRI Translation with 2D Brownian Bridge Diffusion Model
- http://localhost:1313/publication/2024-miccai-ct2mri/
+ http://localhost:1313/ct2mri2024/
Mon, 01 May 2023 00:00:00 +0000
- http://localhost:1313/publication/2024-miccai-ct2mri/
+ http://localhost:1313/ct2mri2024/
diff --git a/publication/2024-miccai-ct2mri/index.html b/publication/2024-miccai-ct2mri/index.html
index 32ab195..f5d2171 100644
--- a/publication/2024-miccai-ct2mri/index.html
+++ b/publication/2024-miccai-ct2mri/index.html
@@ -25,8 +25,8 @@
onload="this.media='all'" disabled>
-
+ href="http://localhost:1313/ct2mri2024/" />
+
@@ -37,20 +37,20 @@
+ content="http://localhost:1313/ct2mri2024/featured.png" />
-
+
+ content="http://localhost:1313/ct2mri2024/featured.png" />
+ type="application/ld+json">{ "@context": "https://schema.org", "@type": "Article", "mainEntityOfPage":{ "@type": "WebPage", "@id": "http://localhost:1313/ct2mri2024/"}, "headline": "Slice-Consistent 3D Volumetric Brain CT-to-MRI Translation with 2D Brownian Bridge Diffusion Model", "image": [ "http://localhost:1313/ct2mri2024/featured.png" ], "datePublished": "2023-05-01T00:00:00Z", "dateModified": "2023-05-01T00:00:00Z", "author":{ "@type": "Person", "name": "Kyobin Choo"}, "publisher":{ "@type": "Organization", "name": "MICV", "logo":{ "@type": "ImageObject", "url": "http://localhost:1313/media/icon_huc20aa2f6f5c0a6f78f1951b0621355e5_26767_192x192_fill_lanczos_center_3.png"}}, "description": "MICCAI 2024 (Early Accept, top 11% of submissions)"}
Slice-Consistent 3D Volumetric Brain CT-to-MRI Translation with 2D Brownian Bridge
Diffusion Model | MICV
@@ -132,7 +132,7 @@
Slice-Consistent 3D Volumetric Brain CT-to-MRI Translation with 2D Brownian