From ba2df7a888720e69cbfac8a2a151ed6f0962bb3a Mon Sep 17 00:00:00 2001 From: Shayan Monadjemi Date: Tue, 12 Nov 2024 23:25:04 -0500 Subject: [PATCH] updated paper data --- _data/program_navigation.yml | 2 +- _layouts/homepage.html | 8 ++++---- program/papers.json | 2 +- src/index.js | 3 ++- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/_data/program_navigation.yml b/_data/program_navigation.yml index e191f7eb1..330c134b5 100644 --- a/_data/program_navigation.yml +++ b/_data/program_navigation.yml @@ -22,7 +22,7 @@ navbar_metadata: display: true label: 'Content' - use_auth0: true + use_auth0: false diff --git a/_layouts/homepage.html b/_layouts/homepage.html index 8e6de3f0f..2609a79cb 100644 --- a/_layouts/homepage.html +++ b/_layouts/homepage.html @@ -3,19 +3,19 @@ --- -{% include toast.html + -{% include alert.html + {% include main-banner.html diff --git a/program/papers.json b/program/papers.json index 671ce76f8..9d97d3ca0 100644 --- a/program/papers.json +++ b/program/papers.json @@ -1 +1 @@ -[{"UID":"w-visxai-1591","abstract":"Linear algebra and matrix computations are often presented in math class as an array of inane formulas and calculations to drill and memorize. This explorable explainer attempts to present a deeper and more visual intuition behind what matrices represent. It experiments with a different kind of medium to present concepts to the reader. Animations of visuals are tied to the reader\u2019s scroll, allowing fine-grained control over more complex transitions. The piece also concludes with an interactive sandbox that readers can fiddle around with to reinforce their understanding and to challenge their intuitions. Readers can adjust the values of the input matrix even in three dimensions, and observe its result on the linear transformation on different kinds of objects \u2013 such as points in space, vectors, and even images and 3D models. https://yizhe-ang.github.io/matrix-explorable/","accessible_pdf":false,"authors":[{"affiliations":["National University of Singapore, Singapore, Singapore"],"email":"ang.yizhe@u.nus.edu","is_corresponding":true,"name":"Yi Zhe Ang"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-1591","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"The Matrix Arcade: A Visual Explorable of Matrix Transformations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-2024","abstract":"Though deep learning models have achieved remarkable success in diverse domains (e.g., facial recognition, autonomous driving), these models have been proven to be quite brittle to perturbations around the input data. Adversarial machine learning (AML) studies attacks that can fool machine learning models into generating incorrect outcomes as well as the defenses against worst-case attacks to strengthen model robustness. Specifically, for image classification, it is challenging to understand adversarial attacks due to their use of subtle perturbations that are not human-interpretable, as well as the variability of attack impacts influenced by attack methods, instance differences, or model architectures. This guide will utilize interactive visualizations to provide a non-expert introduction to adversarial attacks, and visualize the impact of FGSM attacks on two different ResNet-34 models.","accessible_pdf":false,"authors":[{"affiliations":["University of Waterloo, Waterloo, Canada"],"email":"y28you@uwaterloo.ca","is_corresponding":true,"name":"Yuzhe You"},{"affiliations":["University of Waterloo, Waterloo, Canada"],"email":"jianzhao@uwaterloo.ca","is_corresponding":false,"name":"Jian Zhao"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-2024","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h31m3s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Panda or Gibbon? A Beginner's Introduction to Adversarial Attacks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-2967","abstract":"Algorithmic rankers have proven to be very useful in many real-world socio-technical systems, as they assist greatly in making decisions (e.g., who to hire, who to admit). Our conversational interface, TalkToRanker, aims to empower non-expert information consumers to engage with algorithmic rankers via multi-modal conversations involving text and visualizations. We leverage explainable AI methods and the generative power of large language models (LLMs) for facilitating such conversations. We demonstrate the capabilities of TalkToRanker via interactive scenarios from the perspective of an admissions officer.","accessible_pdf":false,"authors":[{"affiliations":["New Jersey Institute of Technology, Newark, United States"],"email":"conor2fitzpatrick@gmail.com","is_corresponding":true,"name":"Conor Fitzpatrick"},{"affiliations":["New Jersey Institute of Technology, Newark, United States"],"email":"jy448@njit.edu","is_corresponding":false,"name":"Jun Yuan"},{"affiliations":["New Jersey Institute of Technology, Newark, United States"],"email":"dasgupta.aritra@gmail.com","is_corresponding":false,"name":"Aritra Dasgupta"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-2967","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=0h55m17s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"TalkToRanker: A Conversational Interface for Ranking-based Decision-Making","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-3472","abstract":"Deciphering the regulatory logic of RNA splicing, a critical process in genome function, remains a major challenge in modern biology. While various machine learning models have been proposed to address this issue, many of them fall short in terms of interpretability, unable to articulate how they arrive at their predictions. We recently introduced an interpretable machine learning model that predicts splicing outcomes based on input sequence and structure. Here, we present a series of interactive data visualization tools to illuminate the process behind the network's predictions. Specifically, we introduce visualizations that emphasize both the global and local interpretability of our model. These visualizations emphasize the clear intermediate reasoning stages of our model that trace how specific RNA features contribute to the final splicing prediction. We highlight how these visualizations can be used to explain the network\u2019s performance on prior training and validation datasets. Finally, we explore how these interactive visualizations can be harnessed to facilitate domain-specific applications, such as rational design of RNA sequences with desired splicing outcomes. Together, these visualizations highlight the role of data visualization and interactivity in enhancing machine learning interpretability and model adoption.","accessible_pdf":false,"authors":[{"affiliations":["New York University, New York, United States"],"email":"msa8779@nyu.edu","is_corresponding":false,"name":"Mateus Silva Aragao"},{"affiliations":["New York University, New York, United States"],"email":"sz3991@nyu.edu","is_corresponding":true,"name":"Shiwen Zhu"},{"affiliations":["New York University, New York, United States"],"email":"nhi.nguyen@nyu.edu","is_corresponding":false,"name":"Nhi Nguyen"},{"affiliations":["University of Pennsylvania, Philadelphia, United States"],"email":"garciarjr.alejandro@gmail.com","is_corresponding":false,"name":"Alejandro Garcia"},{"affiliations":["New York University, New York, United States"],"email":"sl7927@nyu.edu","is_corresponding":false,"name":"Susan Elizabeth Liao"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-3472","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h41m22s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Inside an interpretable-by-design machine learning model: enabling RNA splicing rational design","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-3505","abstract":"Graph Neural Networks (GNNs) have gained huge success in a variety of applications, from modeling protein-protein interactions in biomedical graphs to identifying fraud in social networks. However, the complex structures of graphs and the complicated inner workings of graph neural networks make it hard for non-AI-experts to understand the essential concepts of GNNs. To address this, we present GNN 101, an educational visualization tool designed for interactive learning of GNNs. GNN 101 seamlessly integrates different levels of abstraction, including a model overview, layer operations, and detailed animations for matrix calculations, with smooth transitions between them. It offers both a node-link view and a matrix view, which complement each other. The node-link view supports an intuitive understanding of the graph structure, while the matrix view provides a space-efficient and comprehensive overview of all features and their changes across layers. GNN 101 not only reveals the computation of GNN in an engaging and intuitive way but also effectively demonstrates how node features update layer by layer through learning from their neighbors. It runs locally in web browsers using ONNX Runtime without additional installations or setups.","accessible_pdf":false,"authors":[{"affiliations":["University of Minnesota, Twin Cities, Minneapolis , United States"],"email":"lu000661@umn.edu","is_corresponding":false,"name":"Yilin Lu"},{"affiliations":["University of Minnesota, minneapolis, United States"],"email":"chen8596@umn.edu","is_corresponding":false,"name":"Chongwei Chen"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"mattx0601@gmail.com","is_corresponding":false,"name":"Matthew Xu"},{"affiliations":["University of Minnesota, Minneapolis , United States"],"email":"qianwen@umn.edu","is_corresponding":true,"name":"Qianwen Wang"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-3505","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h35m47s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"What Can a Node Learn from Its Neighbors in Graph Neural Networks?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-3795","abstract":"The goal of this post is to build intuition around localizing information, something we naturally do to make sense of the world, and show how it can be formulated with machine learning as a route to interpretability. The long and short is that we can view the information in data as composed of specific distinctions worth making, in that these distinctions tell us the most about some other quantity we care about.","accessible_pdf":false,"authors":[{"affiliations":["University of Pennsylvania, Philadelphia, United States"],"email":"kieranm@seas.upenn.edu","is_corresponding":true,"name":"Kieran Murphy"},{"affiliations":["University of Pennsylvania, Philadelphia, United States"],"email":"dsb@seas.upenn.edu","is_corresponding":false,"name":"Dani S. Bassett"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-3795","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h9m32s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Where is the information in data?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-4284","abstract":"In this article, we present several key concepts about empirical neural network robustness, including PGD attack, adversarial training, and accuracy-robustness tradeoff, with interactive visualizations.","accessible_pdf":false,"authors":[{"affiliations":["University of Maryland, College Park, United States"],"email":"cchen24@umd.edu","is_corresponding":true,"name":"Chen Chen"},{"affiliations":["Arizona state university, Tempe, United States"],"email":"jhuan196@asu.edu","is_corresponding":false,"name":"Jinbin Huang"},{"affiliations":["University of Maryland, College Park, United States"],"email":"eremsber@terpmail.umd.edu","is_corresponding":false,"name":"Ethan M Remsberg"},{"affiliations":["University of Maryland, College Park, United States"],"email":"leozcliu@umd.edu","is_corresponding":false,"name":"Zhicheng Liu"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-4284","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h26m6s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"A Visual Tour to Empirical Neural Network Robustness","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-4395","abstract":"Large Language Models (LLMs) have revolutionized machine learning and natural language processing, demonstrating remarkable versatility across various tasks. Despite their advancements, their application in critical fields is hindered by a lack of effective interpretability and explainability. In our company, we have fine-tuned a text-to-command conversational AI model that translates natural language inputs into executable network commands. This paper presents our findings on explaining the model\u2019s reasoning processes, aiming to enhance understanding, identify biases, and improve performance. We explore techniques such as token attributions, hidden state visualizations, neuron activation, and attention mechanisms to elucidate model behavior. Our work contributes to the development of more interpretable and trustworthy AI systems, pushing the boundaries of conversational AI.","accessible_pdf":false,"authors":[{"affiliations":["Cisco Systems , Rolle, Switzerland"],"email":"p.stupar@outlook.com","is_corresponding":true,"name":"Petar Stupar"},{"affiliations":["HES-SO, Sion, Switzerland"],"email":"gregory.mermoud@proton.me","is_corresponding":false,"name":"Gregory Mermoud"},{"affiliations":["Cisco Systems, Pairs, France"],"email":"jpvasseur22@gmail.com","is_corresponding":false,"name":"Jean-Philippe Vasseur"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-4395","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=0h47m29s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Explaining Text-to-Command Conversational Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-5402","abstract":"","accessible_pdf":false,"authors":[{"affiliations":["Google Research, Cambridge, United States"],"email":"nadahussein@google.com","is_corresponding":true,"name":"Nada Hussein"},{"affiliations":["Google Research, New York, United States"],"email":"asma_gh@mit.edu","is_corresponding":false,"name":"Asma Ghandeharioun"},{"affiliations":["Google Research, Cambridge, United States"],"email":"ryanmullins@google.com","is_corresponding":false,"name":"Ryan Mullins"},{"affiliations":["Google, Cambridge, United States"],"email":"ereif@google.com","is_corresponding":false,"name":"Emily Reif"},{"affiliations":["Google Research, Mountain View, United States"],"email":"jimbo@google.com","is_corresponding":false,"name":"Jimbo Wilson"},{"affiliations":["Google, Montreal, Canada"],"email":"nthain@google.com","is_corresponding":false,"name":"Nithum Thain"},{"affiliations":["Google, Paris, France"],"email":"ldixon@google.com","is_corresponding":false,"name":"Lucas Dixon"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-5402","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h16m13s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Can Large Language Models Explain Their Internal Mechanisms?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-6211","abstract":"Do you want to understand exactly how AlphaFold3 works? The architecture is quite complicated and the description in the paper can be overwhelming, so we made a much more accessible (but just as detailed!) visual walkthrough. There are already many great explanations of the motivation for protein structure prediction, the CASP competition, model failure modes, debates about evaluations, implications for biotech, etc. so we don\u2019t focus on any of that. Instead we explore the how. How are these molecules represented in the model and what are all of the operations that convert their sequences into a predicted structure? As we walk through every step of this process, we explain 30 algorithms in ~40 clear diagrams, then share some thoughts on how they fit into the broader landscape of ML trends.","accessible_pdf":false,"authors":[{"affiliations":["Stanford University, Palo Alto, United States"],"email":"elanasimon95@gmail.com","is_corresponding":false,"name":"Elana P Simon"},{"affiliations":["Stanford, Stanford, United States"],"email":"jsilberg@stanford.edu","is_corresponding":false,"name":"Jake Silberg"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-6211","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h20m14s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"The Illustrated AlphaFold","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-6324","abstract":"Prompt engineering is an emerging field where researchers are discovering new patterns of communication between humans and large language models. Powerful new abstractions like few-shot examples, tool use and reflection give prompt engineers the ability to create increasingly complex tasks for language models to solve while also opening up opportunities to visualize large prompts more succinctly. ExplainPrompt is a AI visualization project which is mapping out this new language of prompts and distilling them down into a clear and simple visualization style for prompt engineering.","accessible_pdf":false,"authors":[{"affiliations":["GitHub, San Francisco, United States"],"email":"narphorium@gmail.com","is_corresponding":true,"name":"Shawn Simister"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-6324","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"ExplainPrompt: Decoding the language of AI prompts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-9042","abstract":"Transformers, initially designed for Natural Language Processing, have emerged as a strong alternative to Convolutional Neural Networks in Computer Vision. However, their interpretability remains challenging. We overcome the limitations of earlier studies by offering interactive components, engaging the user in the exploration of the Vision Transformer (ViT). Furthermore, we offer various complementary explainability methods to challenge the insight they provide. Key contributions include: - Interactive analysis of the ViT architecture and explainability methods. - Identifying critical information from input images used for classification. - Investigating neuron activations at various depths to understand learned features. - Introducing an innovative adaptation of activation maximization for attention scores to trace attention head focus across network layers. - Highlighting the limitations of each method through occlusion-based interaction. Our findings include that ViTs tend to generalize well by relying on a broad set of object features and contexts seen in the input image. Furthermore, the focus of neurons and attention heads shifts to more complex patterns at deeper layers. We also acknowledge that we cannot rely on a single explainability method to understand the decision-making process of transformers. Our blog post provides an engaging and multi-facetted interpretation of the ViT to the readers by combining interactivity with key research questions.","accessible_pdf":false,"authors":[{"affiliations":["ETH Zurich, Z\u00fcrich, Switzerland"],"email":"anmarx@student.ethz.ch","is_corresponding":false,"name":"Anne Marx"},{"affiliations":["Eth Zurich , Z\u00fcrich, Switzerland"],"email":"yumikimi381@gmail.com","is_corresponding":false,"name":"Yumi Kim"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"luca.sichi@hotmail.com","is_corresponding":false,"name":"Luca Sichi"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"diego.arapovic@gmail.com","is_corresponding":false,"name":"Diego Arapovic"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland","ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"jsanguino@student.ethz.ch","is_corresponding":false,"name":"Javier Sanguino Bautiste"},{"affiliations":["ETH, Zurich, Switzerland","ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"rita.sevastjanova@inf.ethz.ch","is_corresponding":false,"name":"Rita Sevastjanova"},{"affiliations":["ETH Zurich, Zurich, Switzerland","ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"melassady@ai.ethz.ch","is_corresponding":false,"name":"Mennatallah El-Assady"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-9042","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h4m4s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Explainability Perspectives on a Vision Transformer: From Global Architecture to Single Neuron","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1027","abstract":"Advances in high-performance computing require new ways to represent large-scale scientific data to support data storage, data transfers, and data analysis within scientific workflows. Multivariate functional approximation (MFA) has recently emerged as a new continuous meshless representation that approximates raw discrete data with a set of piecewise smooth functions. An MFA model of data thus offers a compact representation and supports high-order evaluation of values and derivatives anywhere in the domain. In this paper, we present CPE-MFA, the first critical point extraction framework designed for MFA models of large-scale, high-dimensional data. CPE-MFA extracts critical points directly from an MFA model without the need for discretization or resampling. This is the first step toward enabling continuous implicit models such as MFA to support topological data analysis at scale.","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"guanqunma94@gmail.com","is_corresponding":true,"name":"Guanqun Ma"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"dlenz@anl.gov","is_corresponding":false,"name":"David Lenz"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"tpeterka@mcs.anl.gov","is_corresponding":false,"name":"Tom Peterka"},{"affiliations":["The Ohio State University, Columbus, United States"],"email":"guo.2154@osu.edu","is_corresponding":false,"name":"Hanqi Guo"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"wang.bei@gmail.com","is_corresponding":false,"name":"Bei Wang"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1027","image_caption":"Critical points identified by CPE-MFA and TTK-MFA. CPE-MFA: our method in a continuous domain. TTK-MFA: a discrete approach implemented in the topology tool kit. Yellow means the perfect alignment between CPE-MFA and TTK-MFA. Purple represents the critical points from TTK-MFA. Pink represents the critical points from CPE-MFA.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.13193","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1027/w-topoinvis-1027_Preview.mp4?token=qfN8QlF5-hfBnFx-u_zUaqlRj9AvuCMVLamZhK9eq_0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1027/w-topoinvis-1027_Preview.srt?token=lA4MEqtRmHXqAZxPCngtyJcdlU8tEXCX1OxWyYFcCHM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"-J4QrJ3FOSA","session_youtube_ff_link":"https://youtu.be/-J4QrJ3FOSA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Critical Point Extraction from Multivariate Functional Approximation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1031","abstract":"3D symmetric tensor fields have a wide range of applications in science and engineering. The topology of such fields can provide critical insight into not only the structures in tensor fields but also their respective applications. Existing research focuses on the extraction of topological features such as degenerate curves and neutral surfaces. In this paper, we investigate the asymptotic behaviors of these topological features in the sphere of infinity. Our research leads to both theoretical analysis and observations that can aid further classifications of tensor field topology.","accessible_pdf":false,"authors":[{"affiliations":["Oregon State University, Corvallis, United States"],"email":"linxinw@oregonstate.edu","is_corresponding":false,"name":"Xinwei Lin"},{"affiliations":["Oregon State University, Corvallis, United States"],"email":"zhangyue@oregonstate.edu","is_corresponding":true,"name":"Yue Zhang"},{"affiliations":["Oregon State University, Corvallis, United States"],"email":"zhange@eecs.oregonstate.edu","is_corresponding":false,"name":"Eugene Zhang"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1031","image_caption":"The asymptotic behaviors of a 3D linear tensor field can be understood by the tensor mode function on the sphere of infinity.In this figure, we show the four topologically different cases: (a) two degenerate curves and the neutral surface with one boundary, (b) two degenerate curves and the neutral surface with three boundaries, (c) four degenerate curves and the neutral surface with one boundary, and (d) four degenerate curves and the neutral surface with three boundaries.In each of these cases, the degenerate curves intersect the sphere of infinity at the global maxima (yellow dots) and global minima (green dots) of the tensor mode function. Similarly, the neutral surface intersects the sphere of infinity at precisely the zeroth level set of the mode function. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Asymptotic Topology of 3D Linear Symmetric Tensor Fields","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1033","abstract":"Jacobi sets are an important method to investigate the relationship between Morse functions. The Jacobi set for two Morse functions is the set of all points where the functions' gradients are linearly dependent. Both the segmentation of the domain by Jacobi sets and the Jacobi sets themselves have proven to be useful tools in multi-field visualization, data analysis in various applications, and for accelerating extraction algorithms. On a triangulated grid, they can be calculated by a piecewise linear interpolation. In practice, Jacobi sets can become very complex and large due to noise and numerical errors. Some techniques for simplifying Jacobi sets exist, but these only reduce individual elements such as noise or are purely theoretical. These techniques often only change the visual representation of the Jacobi sets, but not the underlying data. In this paper, we present an algorithm that simplifies the Jacobi sets for 2D bivariate scalar fields and at the same time modifies the underlying bivariate scalar fields while preserving the essential structures of the fields. We use a neighborhood graph to select the areas to be reduced and collapse these cells individually. We investigate the influence of different neighborhood graphs and present an adaptation for the visualization of Jacobi sets that take the collapsed cells into account. We apply our algorithm to a range of analytical and real-world data sets and compare it with established methods that also simplify the underlying bivariate scalar fields.","accessible_pdf":false,"authors":[{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"raith@informatik.uni-leipzig.de","is_corresponding":true,"name":"Felix Raith"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"scheuermann@informatik.uni-leipzig.de","is_corresponding":false,"name":"Gerik Scheuermann"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"heine@informatik.uni-leipzig.de","is_corresponding":false,"name":"Christian Heine"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1033","image_caption":"Comparison of the calculated Jacobi sets in the Cylinder Flow dataset on the left side of the figure for the original dataset in the upper figure before simplification and the dataset in the lower figure after simplification with the collapse algorithm with threshold t = 0.0001. Furthermore, the corresponding neighborhood graphs are displayed on the right side. In this figure, the color corresponds to the orientation, red, positive orientation (det \u2207f(x) > 0), and blue, negative orientation (det \u2207f(x) < 0). The saturation indicates the range area. High saturation means a large range area, and vice versa for low saturation. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.08097","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1033/w-topoinvis-1033_Preview.mp4?token=pfMUjDzu2of7xxTP2BY9WDnTHYf8QGffCYIbJZ3DkIY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1033/w-topoinvis-1033_Preview.srt?token=RVqJGkHIIu7_Z84jZuTwdgvtrIlkMrEVatymxFiTaU0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"4KyGneBGdlY","session_youtube_ff_link":"https://youtu.be/4KyGneBGdlY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Topological Simplifcation of Jacobi Sets for Piecewise-Linear Bivariate 2D Scalar Fields","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1034","abstract":"The Morse-Smale complex is a standard tool in visual data analysis. The classic definition is based on a continuous view of the gradient of a scalar function where its zeros are the critical points. These points are connected via gradient curves and surfaces emanating from saddle points, known as separatrices. In a discrete setting, the Morse-Smale complex is commonly extracted by constructing a combinatorial gradient assuming the steepest descent direction. Previous works have shown that this method results in a geometric embedding of the separatrices that can be fundamentally different from those in the continuous case. To achieve a similar embedding, different approaches for constructing a combinatorial gradient were proposed. In this paper, we show that these approaches generate a different topology, i.e., the connectivity between critical points changes. Additionally, we demonstrate that the steepest descent method can compute topologically and geometrically accurate Morse-Smale complexes when applied to certain types of grids. Based on these observations, we suggest a method to attain both geometric and topological accuracy for the Morse-Smale complex of data sampled on a uniform grid.","accessible_pdf":false,"authors":[{"affiliations":["KTH Royal Institute of Technology, Stockholm, Sweden"],"email":"sonlt@kth.se","is_corresponding":true,"name":"Son Le Thanh"},{"affiliations":["KTH Royal Institute of Technology, Stockholm, Sweden"],"email":"ankele@iai.uni-bonn.de","is_corresponding":false,"name":"Michael Ankele"},{"affiliations":["KTH Royal Institute of Technology, Stockholm, Sweden"],"email":"weinkauf@kth.se","is_corresponding":false,"name":"Tino Weinkauf"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1034","image_caption":"Shown is the Morse-Smale complex of an analytic function representing a circle engraved in a tilted plane. It can be computed using the provably correct steepest descent method as shown by the orange lines. This method struggles to produce a geometric embedding similar to that of continuous topology, i.e. the circular shape. Although several approaches have been proposed to address this issue, in this paper, we show systematically that they generate different topologies. We show that geometrical and topological accuracy can be achieved by applying the steepest descent method on a modified grid structure, illustrated by the white lines.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.05532","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Revisiting Accurate Geometry for the Morse-Smale Complexes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1038","abstract":"This paper presents a nested tracking framework for analyzing cycles in 2D force networks within granular materials. These materials are composed of interacting particles, whose interactions are described by a force network. Understanding the cycles within these networks at various scales and their evolution under external loads is crucial, as they significantly contribute to the mechanical and kinematic properties of the system. Our approach involves computing a cycle hierarchy by partitioning the 2D domain into regions bounded by cycles in the force network. We can adapt concepts from nested tracking graphs originally developed for merge trees by leveraging the duality between this partitioning and the cycles. We demonstrate the effectiveness of our method on two force networks derived from experiments with photo-elastic disks.","accessible_pdf":true,"authors":[{"affiliations":["Link\u00f6ping University, Link\u00f6ping, Sweden"],"email":"farhan.rasheed@liu.se","is_corresponding":true,"name":"Farhan Rasheed"},{"affiliations":["Indian Institute of Science, Bangalore, India"],"email":"abrarnaseer@iisc.ac.in","is_corresponding":false,"name":"Abrar Naseer"},{"affiliations":["Link\u00f6ping university, Norrk\u00f6ping, Sweden"],"email":"emma.nilsson@liu.se","is_corresponding":false,"name":"Emma Nilsson"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"talha.bin.masood@liu.se","is_corresponding":false,"name":"Talha Bin Masood"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"ingrid.hotz@liu.se","is_corresponding":false,"name":"Ingrid Hotz"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1038","image_caption":"A tracking graph illustrating the development of cycles in a dynamic planar graph. Each column corresponds to a specific time point, with the nodes in the each column corresponding to a region encloues by a cycle in the partitioning of the underlying domain (shown at bottom). The color highlights the local development of the spatial system.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.06476","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1038/w-topoinvis-1038_Preview.mp4?token=ojyOF4m7qIFNwHfc3R2ZTN-NE8U_X12IDfVrSV0g9QE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1038/w-topoinvis-1038_Preview.srt?token=IBiN8r08yS_XEtLFwQrAHLdQeEhgWj19vwFJvCtPvZo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"DhRmBk_dTns","session_youtube_ff_link":"https://youtu.be/DhRmBk_dTns","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Multi-scale Cycle Tracking in Dynamic Planar Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1041","abstract":"Tetrahedral meshes are widely used due to their flexibility and adaptability in representing changes of complex geometries and topology. However, most existing data structures struggle to efficiently encode the irregular connectivity of tetrahedral meshes with billions of vertices.We address this problem by proposing a novel framework for efficient and scalable analysis of large tetrahedral meshes using Apache Spark. The proposed framework, called Tetra-Spark, features optimized approaches to locally compute many connectivity relations by first retrieving the Vertex-Tetrahedron (VT) relation. This strategy significantly improves Tetra-Spark's efficiency in performing morphology computations on large tetrahedral meshes.To prove the effectiveness and scalability of such a framework, we conduct a comprehensive comparison against a vanilla Spark implementation for the analysis of tetrahedral meshes. Our experimental evaluation shows that Tetra-Spark achieves up to a 78x speedup and reduces memory usage by up to 80% when retrieving connectivity relations with the VT relation available. This optimized design further accelerates subsequent morphology computations, resulting in up to a 47.7x speedup.","accessible_pdf":true,"authors":[{"affiliations":["University of Maryland, College Park, College Park, United States"],"email":"yhqian@umd.edu","is_corresponding":true,"name":"Yuehui Qian"},{"affiliations":["Clemson University, Clemson, United States"],"email":"guoxil@clemson.edu","is_corresponding":false,"name":"Guoxi Liu"},{"affiliations":["Clemson University, Clemson, United States"],"email":"fiurici@clemson.edu","is_corresponding":false,"name":"Federico Iuricich"},{"affiliations":["University of Maryland, College Park, United States"],"email":"deflo@umiacs.umd.edu","is_corresponding":false,"name":"Leila De Floriani"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1041","image_caption":"Figure: (a) The time cost (in minutes) for extracting connectivity relations and executing the algorithm in computing Forman gradient. (b) The peak memory consumption (in GB) for extracting relations. (c) The peak memory usage (in GB) for the entire computation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Efficient representation and analysis for a large tetrahedral mesh using Apache Spark","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1002","abstract":"Cuneiform is the earliest known system of writing, first developed for the Sumerian language of southern Mesopotamia in the second half of the 4th millennium BC. Cuneiform signs are obtained by impressing a stylus on fresh clay tablets. For certain purposes, e.g. authentication by seal imprint, some cuneiform tablets were enclosed in clay envelopes, which cannot be opened without destroying them. The aim of our interdisciplinary project is the non-invasive study of clay tablets. A portable X-ray micro-CT scanner is developed to acquire density data of such artifacts on a high-resolution, regular 3D grid at collection sites. The resulting volume data is processed through feature-preserving denoising, extraction of high-accuracy surfaces using a manifold dual marching cubes algorithm and extraction of local features by enhanced curvature rendering and ambient occlusion. For the non-invasive study of cuneiform inscriptions, the tablet is virtually separated from its envelope by curvature-based segmentation. The computational- and data-intensive algorithms are optimized for near-real-time offline usage with limited resources at collection sites. To visualize the complexity-reduced and octree-based compressed representation of surfaces, we develop and implement an interactive application. To facilitate the analysis of such clay tablets, we implement shape-based feature extraction algorithms to enhance cuneiform recognition. Our workflow supports innovative 3D display and interaction techniques such as autostereoscopic displays and gesture control.","accessible_pdf":true,"authors":[{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"stephan.olbrich@uni-hamburg.de","is_corresponding":false,"name":"Stephan Olbrich"},{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"andreas.beckert@uni-hamburg.de","is_corresponding":true,"name":"Andreas Beckert"},{"affiliations":["Centre National de la Recherche Scientifique (CNRS), Nanterre, France"],"email":"cecile.michel@cnrs.fr","is_corresponding":false,"name":"C\u00e9cile Michel"},{"affiliations":["Deutsches Elektronen-Synchrotron (DESY), Hamburg, Germany","Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"christian.schroer@desy.de","is_corresponding":false,"name":"Christian Schroer"},{"affiliations":["Deutsches Elektronen-Synchrotron (DESY), Hamburg, Germany","Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"samaneh.ehteram@desy.de","is_corresponding":false,"name":"Samaneh Ehteram"},{"affiliations":["Deutsches Elektronen-Synchrotron (DESY), Hamburg, Germany"],"email":"andreas.schropp@desy.de","is_corresponding":false,"name":"Andreas Schropp"},{"affiliations":["Deutsches Elektronen-Synchrotron (DESY), Hamburg, Germany"],"email":"philipp.paetzold@desy.de","is_corresponding":false,"name":"Philipp Paetzold"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1002","image_caption":"Virtual unpacking of an ancient clay tablet enclosed in another layer of clay. The surfaces are reconstructed from computed tomography data, which are acquired using a specially designed instrument developed for this purpose. The rendering of the reconstructed surfaces is refined with features such as enhanced curvature and ambient occlusion.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"http://arxiv.org/abs/2409.04236","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Efficient Analysis and Visualization of High-Resolution Computed Tomography Data for the Exploration of Enclosed Cuneiform Tablets","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1003","abstract":"Dimensionality reduction (DR) is a well-established approach for the visualization of high-dimensional data sets. While DR methods are often applied to typical DR benchmark data sets in the literature, they might suffer from high runtime complexity and memory requirements, making them unsuitable for large data visualization especially in environments outside of high-performance computing. To perform DR on large data sets, we propose the use of out-of-sample extensions.Such extensions allow inserting new data into existing projections, which we leverage to iteratively project data into a reference projection that consists only of a small manageable subset. This process makes it possible to perform DR out-of-core on large data, which would otherwise not be possible due to memory and runtime limitations. For metric multidimensional scaling (MDS), we contribute an implementation with out-of-sample projection capability since typical software libraries do not support it. We provide an evaluation of the projection quality of five common DR algorithms (MDS, PCA, t-SNE, UMAP, and autoencoders) using quality metrics from the literature and analyze the trade-off between the size of the reference set and projection quality. The runtime behavior of the algorithms is also quantified with respect to reference set size, out-of-sample batch size, and dimensionality of the data sets. Furthermore, we compare the out-of-sample approach to other recently introduced DR methods, such as PaCMAP and TriMAP, which claim to handle larger data sets than traditional approaches. To showcase the usefulness of DR on this large scale, we contribute a use case where we analyze ensembles of streamlines amounting to one billion projected instances.","accessible_pdf":false,"authors":[{"affiliations":["Universit\u00e4t Stuttgart, Stuttgart, Germany"],"email":"lucareichmann01@gmail.com","is_corresponding":false,"name":"Luca Marcel Reichmann"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"david.haegele@visus.uni-stuttgart.de","is_corresponding":false,"name":"David H\u00e4gele"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1003","image_caption":"The projections show the results of dimensionality reduction using the out-of-sample approach with data sets containing up to 50 million data points. In each column, the sizes of the reference set are increased. The size used for creating the initial reference projection are shown by the number above each plot. We show the results for popular dimensionality reduction techniques: MDS, PCA, t-SNE, UMAP, and autoencoder. The projections are evaluated using various quality metrics.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"https://arxiv.org/abs/2408.04129v1","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/a-ldav/a-ldav-1003/a-ldav-1003_Preview.mp4?token=PYJw_loXL4L1qAH_hlEUHMBLIR9img4Wuf2hw1VfGqw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/a-ldav/a-ldav-1003/a-ldav-1003_Preview.srt?token=wztCbEzC5jXSgn8_YRdZ6ycRd17DxhBRfIfdjynStm0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"Xm6e1eW5DxA","session_youtube_ff_link":"https://youtu.be/Xm6e1eW5DxA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Out-of-Core Dimensionality Reduction for Large Data via Out-of-Sample Extensions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1006","abstract":"Scientists generate petabytes of data daily to help uncover environmental trends or behaviors that are hard to predict. For example, understanding climate simulations based on the long-term average of temperature, precipitation, and other environmental variables is essential to predicting and establishing root causes of future undesirable scenarios and assessing possible mitigation strategies. Unfortunately, bottlenecks in petascale workflows restrict scientists' ability to analyze and visualize the necessary information due to requirements for extensive computational resources, obstacles in data accessibility, and inefficient analysis algorithms. This paper presents an approach to managing, visualizing, and analyzing petabytes of data within a browser on equipment ranging from the top NASA supercomputer to commodity hardware like a laptop. Our approach is based on a novel data fabric abstraction layer that allows querying scientific information in a form that is user-friendly while hiding the complexities of dealing with file systems or cloud services. We also optimize network utilization while streaming from petascale repositories through state-of-the-art progressive compression algorithms. Based on this abstraction, we provide customizable dashboards that can be accessed from any device with an internet connection, offering straightforward access to vast amounts of data typically not available to those without access to uniquely expensive hardware resources. Our dashboards provide and improve the ability to access and, more importantly, use massive data for a wide range of users, from top scientists with access to leadership-class computing environments to undergraduate students of disadvantaged backgrounds from minority-serving institutions. We focus on NASA's use of petascale climate datasets as an example of particular societal impact and, therefore, a case where achieving equity in science participation is critical. In particular, we validate our approach by improving the ability of climate scientist to explore their data even on the top NASA supercomputer, introducing the ability to study their data in a fully interactive environment instead of being limited to using pre-choreographed videos that can take days to generate each. We also successfully introduced the same dashboards and simplified training material in an undergraduate class on Geospatial Analysis in a minority-serving campus (Utah State Banding) with 69% of the Native American students and 86% being low-income. The same dashboards are also released in simplified form to the general public, providing an unparalleled democratization for the access and use of climate data that can be extended to most scientific domains.","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"aashishpanta0@gmail.com","is_corresponding":true,"name":"Aashish Panta"},{"affiliations":["Scientific Computing and Imaging Institute, Salt Lake City, United States"],"email":"xuanhuang@sci.utah.edu","is_corresponding":false,"name":"Xuan Huang"},{"affiliations":["NASA Ames Research Center, Mountain View, United States"],"email":"nina.mccurdy@gmail.com","is_corresponding":false,"name":"Nina McCurdy"},{"affiliations":["NASA, mountain View, United States"],"email":"david.ellsworth@nasa.gov","is_corresponding":false,"name":"David Ellsworth"},{"affiliations":["university of Utah, Salt lake city, United States"],"email":"amy.a.gooch@gmail.com","is_corresponding":false,"name":"Amy Gooch"},{"affiliations":["university of Utah, Salt lake city, United States"],"email":"scrgiorgio@gmail.com","is_corresponding":false,"name":"Giorgio Scorzelli"},{"affiliations":["NASA, Pasadena, United States"],"email":"hector.torres.gutierrez@jpl.nasa.gov","is_corresponding":false,"name":"Hector Torres"},{"affiliations":["caltech, Pasadena, United States"],"email":"pklein@caltech.edu","is_corresponding":false,"name":"Patrice Klein"},{"affiliations":["Utah State University Blanding, Blanding, United States"],"email":"gustavo.ovando@usu.edu","is_corresponding":false,"name":"Gustavo Ovando-Montejo"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"pascucci.valerio@gmail.com","is_corresponding":false,"name":"Valerio Pascucci"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1006","image_caption":"We provide unprecedented equitable access to massive data via our novel data fabric abstraction enabled by dashboards on commodity desktop computers with a simple weblink for everyone from top NASA scientists to students in disadvantaged communities to the general public. This image shows a field called Eastward Wind Velocity (U), combined together from a cubed-sphere grid. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"https://arxiv.org/abs/2408.11831v1","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Web-based Visualization and Analytics of Petascale data: Equity as a Tide that Lifts All Boats","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1011","abstract":"This paper describes the adaptation of a well-scaling parallel algorithm for computing Morse-Smale segmentations based on path compression to a distributed computational setting. Additionally, we extend the algorithm to efficiently compute connected components in distributed structured and unstructured grids, based either on the connectivity of the underlying mesh or a feature mask. Our implementation is seamlessly integrated with the distributed extension of the Topology ToolKit (TTK), ensuring robust performance and scalability. To demonstrate the practicality and efficiency of our algorithms, we conducted a series of scaling experiments on large-scale datasets, with sizes of up to 4096^3 vertices on up to 64 nodes and 768 cores.","accessible_pdf":false,"authors":[{"affiliations":["RPTU Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"mswill@rhrk.uni-kl.de","is_corresponding":true,"name":"Michael Will"},{"affiliations":["RPTU Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"jl@jluk.de","is_corresponding":false,"name":"Jonas Lukasczyk"},{"affiliations":["CNRS, Paris, France","Sorbonne Universit\u00e9, Paris, France"],"email":"julien.tierny@sorbonne-universite.fr","is_corresponding":false,"name":"Julien Tierny"},{"affiliations":["RPTU Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"garth@rptu.de","is_corresponding":false,"name":"Christoph Garth"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1011","image_caption":"Left: an illustration of using path compression to quickly compute the ascending / descending segmentations. Right: Illustrating the use of Connected Component extraction for data segmentation. Running these computations on multiple nodes allows us to use much larger datasets by using the distributed memory of all the nodes.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/a-ldav/a-ldav-1011/a-ldav-1011_Preview.mp4?token=YIVcce37MaN-COCzsAbFkZ34Fu6IKgmMjjUjQIjzuRA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/a-ldav/a-ldav-1011/a-ldav-1011_Preview.srt?token=v3vbBEIZLjIo9y9Trcw7LtJm2WnYO6W32DFzV3Q5W1Q&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"_9KgKT3__LM","session_youtube_ff_link":"https://youtu.be/_9KgKT3__LM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Distributed Path Compression for Piecewise Linear Morse-Smale Segmentations and Connected Components","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1016","abstract":"We propose and discuss a paradigm that allows for expressing data- parallel rendering with the classically non-parallel ANARI API. We propose this as a new standard for data-parallel rendering, describe two different implementations of this paradigm, and use multiple sample integrations into existing applications to show how easy it is to adopt, and what can be gained from doing so.","accessible_pdf":false,"authors":[{"affiliations":["NVIDIA, Salt Lake City, United States"],"email":"ingowald@gmail.com","is_corresponding":true,"name":"Ingo Wald"},{"affiliations":["University of Cologne, Cologne, Germany"],"email":"zellmann@uni-koeln.de","is_corresponding":false,"name":"Stefan Zellmann"},{"affiliations":["NVIDIA, Austin, United States"],"email":"jeffamstutz@gmail.com","is_corresponding":false,"name":"Jefferson Amstutz"},{"affiliations":["University of California, Davis, Davis, United States"],"email":"qadwu@ucdavis.edu","is_corresponding":false,"name":"Qi Wu"},{"affiliations":["NVIDIA, Santa Clara, United States"],"email":"kgriffin@nvidia.com","is_corresponding":false,"name":"Kevin Shawn Griffin"},{"affiliations":["VSB - Technical University of Ostrava, Ostrava, Czech Republic"],"email":"milan.jaros@vsb.cz","is_corresponding":false,"name":"Milan Jaro\u0161"},{"affiliations":["University of Cologne, Cologne, Germany"],"email":"wesner@uni-koeln.de","is_corresponding":false,"name":"Stefan Wesner"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1016","image_caption":"Several examples of large sci-vis data being rendered using the data-parallel ANARI paradigm proposed in this paper. From left to right: a) Roughly one billion color-mapped spheres, rendered using HayStack and BANARI. b) The roughly 500GB DNS data set, with volume path tracing on 128 GPUs, also using HayStack and BANARI. c) An iso-surface rendered during an in-situ Ascent session, while attached to an S3D simulation. d) ParaView performing data-parallel rendering on the airplane data set, using our data-parallel ANARI integration in pvserver. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Standardized Data-Parallel Rendering Using ANARI","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1018","abstract":"Functional approximation as a high-order continuous representation provides a more accurate value and gradient query compared to the traditional discrete volume representation. Volume visualization directly rendered from functional approximation generates high-quality rendering results without high-order artifacts caused by trilinear interpolations. However, querying an encoded functional approximation is computationally expensive, especially when the input dataset is large, making functional approximation impractical for interactive visualization. In this paper, we proposed a novel functional approximation multi-resolution representation, Adaptive-FAM, which is lightweight and fast to query. We also design a GPU-accelerated out-of-core multi-resolution volume visualization framework that directly utilizes the Adaptive-FAM representation to generate high-quality rendering with interactive responsiveness. Our method can not only dramatically decrease the caching time, one of the main contributors to input latency, but also effectively improve the cache hit rate through prefetching. Our approach significantly outperforms the traditional function approximation method in terms of input latency while maintaining comparable rendering quality.","accessible_pdf":true,"authors":[{"affiliations":["University of Nebraska-Lincoln, Lincoln, United States"],"email":"jianxin.sun@huskers.unl.edu","is_corresponding":true,"name":"Jianxin Sun"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"dlenz@anl.gov","is_corresponding":false,"name":"David Lenz"},{"affiliations":["University of Nebraska-Lincoln, Lincoln, United States"],"email":"yu@cse.unl.edu","is_corresponding":false,"name":"Hongfeng Yu"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"tpeterka@mcs.anl.gov","is_corresponding":false,"name":"Tom Peterka"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1018","image_caption":"Adaptive-FAM is a novel functional approximation multi-resolution representation that is lightweight and fast to query. A GPU-accelerated out-of-core multi-resolution volume visualization framework is designed to directly utilize the Adaptive-FAM representation to generate high-quality rendering with interactive responsiveness. Our method can not only dramatically decrease the caching time, one of the main contributors to input latency, but also effectively improve the cache hit rate through prefetching. Our approach significantly outperforms the traditional function approximation method in terms of input latency while maintaining comparable rendering quality. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"https://arxiv.org/pdf/2409.00184","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/a-ldav/a-ldav-1018/a-ldav-1018_Preview.mp4?token=GL1VSSAmjgz3CWILLa9K7nXfw-Zi9CzBXtYPWYBHkJU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"XCfEmhA78EI","session_youtube_ff_link":"https://youtu.be/XCfEmhA78EI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Adaptive Multi-Resolution Encoding for Interactive Large-Scale Volume Visualization through Functional Approximation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1000","abstract":"Efficient public transport systems are crucial for sustainable urban development as cities face increasing mobility demands. Yet, many public transport networks struggle to meet diverse user needs due to historical development, urban constraints, and financial limitations. Traditionally, planning of transport network structure is often based on limited surveys, expert opinions, or partial usage statistics. This provides an incomplete basis for decision-making. We introduce an data-driven approach to public transport planning and optimization, calculating detailed accessibility measures at the individual housing level. Our visual analytics workflow combines population-group-based simulations with dynamic infrastructure analysis, utilizing a scenario-based model to simulate daily travel patterns of varied demographic groups, including schoolchildren, students, workers, and pensioners. These population groups, each with unique mobility requirements and routines, interact with the transport system under different scenarios traveling to and from Points of Interest (POI), assessed through travel time calculations. Results are visualized through heatmaps, density maps, and network overlays, as well as detailed statistics. Our system allows us to analyze both the underlying data and simulation results on multiple levels of granularity, delivering both broad insights and granular details. Case studies with the city of Konstanz, Germany reveal key areas where public transport does not meet specific needs, confirmed through a formative user study. Due to the high cost of changing legacy networks, our analysis facilitates the identification of strategic enhancements, such as optimized schedules or rerouting, and few targeted stop relocations, highlighting consequential variations in accessibility to pinpointing critical service gaps. Our research advances urban transport analytics by providing policymakers and citizens with a system that delivers both broad insights with granular detail into public transport services for a data-driven quality assessment at housing-level detail.","accessible_pdf":true,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"yannick.metz@uni-konstanz.de","is_corresponding":false,"name":"Yannick Metz"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"dennis-fabian.ackermann@uni-konstanz.de","is_corresponding":false,"name":"Dennis Ackermann"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"max.fischer@uni-konstanz.de","is_corresponding":true,"name":"Maximilian T. Fischer"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1000","image_caption":"Advancing urban transport infrastructure analysis through an interactive simulation framework of Mobility Profiles. This method integrates multi-source open data sources and integrates them with network flow simulations, encapsulated within an enriched map visualization to assess the quality - i.e. connectedness and travel times - of public transport at housing-level detail. Users can dynamically alter and explore mobility scenarios for various demographics, control the analysis through several components, and enhance the results with contextual background and network information. This enables interactive, systematic comparisons against diverse operational assumptions.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"https://arxiv.org/abs/2407.10791","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T16:55:00Z","title":"Interactive Public Transport Infrastructure Analysis through Mobility Profiles: Making the Mobility Transition Transparent","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1002","abstract":"This position paper explores the interplay between automation and human involvement in data science. It synthesizes perspectives from Automated Data Science (AutoDS) and Interactive Data Visualization (VIS), which traditionally represent opposing ends of the human-machine spectrum. While AutoDS aims to enhance efficiency by reducing human tasks, VIS emphasizes the importance of nuanced understanding, innovation, and context provided by human involvement. This paper examines these dichotomies through an online survey and advocates for a balanced approach that harmonizes the efficiency of automation with the irreplaceable insights of human expertise. Ultimately, we address the essential question of not just what we can automate, but what we should automate, seeking strategies that prioritize technological advancement alongside the fundamental need for human oversight.","accessible_pdf":false,"authors":[{"affiliations":["Tufts University, Boston, United States"],"email":"jen@cs.tufts.edu","is_corresponding":true,"name":"Jen Rogers"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, INRIA, Orsay, France"],"email":"mehdi.chakhchoukh@universite-paris-saclay.fr","is_corresponding":false,"name":"Mehdi Chakhchoukh"},{"affiliations":["Leiden Universiteit, Leiden, Netherlands"],"email":"anastacio@aim.rwth-aachen.de","is_corresponding":false,"name":"Marie Anastacio"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"rfaust1@tulane.edu","is_corresponding":false,"name":"Rebecca Faust"},{"affiliations":["University of Warwick, Coventry, United Kingdom"],"email":"cagatay.turkay@warwick.ac.uk","is_corresponding":false,"name":"Cagatay Turkay"},{"affiliations":["University of Wyoming, Laramie, United States"],"email":"larsko@uwyo.edu","is_corresponding":false,"name":"Lars Kotthoff"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"steffen.koch@vis.uni-stuttgart.de","is_corresponding":false,"name":"Steffen Koch"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"andreas.kerren@liu.se","is_corresponding":false,"name":"Andreas Kerren"},{"affiliations":["University of Zurich, Zurich, Switzerland"],"email":"bernard@ifi.uzh.ch","is_corresponding":false,"name":"J\u00fcrgen Bernard"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1002","image_caption":"The tug-of-war between automation and human involvement in data science: As automation technology advances, the balance between human intuition and machine efficiency becomes increasingly critical. Accessibility Description: An illustration of a tug-of-war between a robot on one side and three human figures on the other. The robot, representing automation, pulls one end of a rope while the human figures, symbolizing human involvement, pull from the opposite side. The image conveys the tension between automated processes and human input in data science.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1002/s-vds-1002_Preview.mp4?token=95WeM8irbKnh9Du-lE9XJTq36pdxWIujIGgIGk3Z1n4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1002/s-vds-1002_Preview.srt?token=8fssMCCBdK9YKDz1Yu5pDc-mAEXHleiX_h-nOSWcRrc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T16:45:00Z","title":"Visualization and Automation in Data Science: Exploring the Paradox of Humans-in-the-Loop","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1007","abstract":"Categorical data does not have an intrinsic definition of distance or order, and therefore, established visualization techniques for categorical data only allow for a set-based or frequency-based analysis, e.g., through Euler diagrams or Parallel Sets, and do not support a similarity-based analysis. We present a novel dimensionality reduction-based visualization for categorical data, which is based on defining the distance of two data items as the number of varying attributes. Our technique enables users to pre-attentively detect groups of similar data items and observe the properties of the projection, such as attributes strongly influencing the embedding. Our prototype visually encodes data properties in an enhanced scatterplot-like visualization, visualizing attributes in the background to show the distribution of categories. In addition, we propose two graph-based measures to quantify the plot's visual quality, which rank attributes according to their contribution to cluster cohesion. To demonstrate the capabilities of our similarity-based projection method, we compare it to Euler diagrams and Parallel Sets regarding visual scalability and evaluate it quantitatively on seven real-world datasets using a range of common quality measures. Further, we validate the benefits of our approach through an expert study with five data scientists analyzing the Titanic and Mushroom dataset with up to 23 attributes and 8124 category combinations. Our results indicate that our Categorical Data Map offers an effective analysis method for large datasets with a high number of category combinations.","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"frederik.dennig@uni-konstanz.de","is_corresponding":true,"name":"Frederik L. Dennig"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"lucas.joos@uni-konstanz.de","is_corresponding":false,"name":"Lucas Joos"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"patrick.paetzold@uni-konstanz.de","is_corresponding":false,"name":"Patrick Paetzold"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"blumbergdaniela@gmail.com","is_corresponding":false,"name":"Daniela Blumberg"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"oliver.deussen@uni-konstanz.de","is_corresponding":false,"name":"Oliver Deussen"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"max.fischer@uni-konstanz.de","is_corresponding":false,"name":"Maximilian T. Fischer"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1007","image_caption":"The Categorical Data Map enables projection-based analysis of categorical data here exemplified by the Property Sales dataset with MDS using the Jaccard coefficient: (1) shows 10 groups without layout enrichment. (2) shows a clear separation between Private Property vs Public Property. (3) indicates boundaries and symmetries for the Location of Purchased Property attribute, while in (4), the Property Type Purchased contributes the least to the clusters. The glyph sizes encode the subset sizes, revealing that categories Private Propriety and Central often occur together.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"https://arxiv.org/abs/2404.16044","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T17:45:00Z","title":"The Categorical Data Map: A Multidimensional Scaling-Based Approach","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1013","abstract":"Clustering is an essential technique across various domains, such as data science, machine learning, and eXplainable Artificial Intelligence.Information visualization and visual analytics techniques have been proven to effectively support human involvement in the visual exploration of clustered data to enhance the understanding and refinement of cluster assignments. This paper presents an attempt of a deep and exhaustive evaluation of the perceptive aspects of clustering quality metrics, focusing on the Davies-Bouldin Index, Dunn Index, Calinski-Harabasz Index, and Silhouette Score. Our research is centered around two main objectives: a) assessing the human perception of common CVIs in 2D scatterplots and b) exploring the potential of Large Language Models (LLMs), in particular GPT-4o, to emulate the assessed human perception. By discussing the obtained results, highlighting limitations, and areas for further exploration, this paper aims to propose a foundation for future research activities.","accessible_pdf":false,"authors":[{"affiliations":["Sapienza University of Rome, Rome, Italy"],"email":"blasilli@diag.uniroma1.it","is_corresponding":true,"name":"Graziano Blasilli"},{"affiliations":["Northeastern University, Boston, United States"],"email":"kerrigan.d@northeastern.edu","is_corresponding":false,"name":"Daniel Kerrigan"},{"affiliations":["Northeastern University, Boston, United States"],"email":"e.bertini@northeastern.edu","is_corresponding":false,"name":"Enrico Bertini"},{"affiliations":["Sapienza University of Rome, Rome, Italy"],"email":"santucci@diag.uniroma1.it","is_corresponding":false,"name":"Giuseppe Santucci"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1013","image_caption":"This paper presents the first attempt of a deep and exhaustive evaluation of the perceptive aspects of clustering quality metrics, focusing on the Davies-Bouldin Index, Dunn Index, Calinski-Harabasz Index, and Silhouette Score. Our research is centered around two main objectives: a) assessing the human perception of the metrics in 2D scatterplots and b) exploring the potential of Large Multimodal Models, in particular GPT-4o, to emulate the assessed human perception.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T17:05:00Z","title":"Towards a Visual Perception-Based Analysis of Clustering Quality Metrics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1021","abstract":"Recommender systems have become integral to digital experiences, shaping user interactions and preferences across various platforms. Despite their widespread use, these systems often suffer from algorithmic biases that can lead to unfair and unsatisfactory user experiences. This study introduces an interactive tool designed to help users comprehend and explore the impacts of algorithmic harms in recommender systems. By leveraging visualizations, counterfactual explanations, and interactive modules, the tool allows users to investigate how biases such as miscalibration, stereotypes, and filter bubbles affect their recommendations. Informed by in-depth user interviews, both general users and researchers can benefit from increased transparency and personalized impact assessments, ultimately fostering a better understanding of algorithmic biases and contributing to more equitable recommendation outcomes. This work provides valuable insights for future research and practical applications in mitigating bias and enhancing fairness in machine learning algorithms.","accessible_pdf":false,"authors":[{"affiliations":["University of Pittsburgh, Pittsburgh, United States"],"email":"yongsu.ahn@pitt.edu","is_corresponding":true,"name":"Yongsu Ahn"},{"affiliations":["School of Computing and Information, University of Pittsburgh, Pittsburgh, United States"],"email":"quinnkwolter@gmail.com","is_corresponding":false,"name":"Quinn K Wolter"},{"affiliations":["Quest Diagnostics, Pittsburgh, United States"],"email":"jonilyndick@gmail.com","is_corresponding":false,"name":"Jonilyn Dick"},{"affiliations":["Quest Diagnostics, Pittsburgh, United States"],"email":"janetad99@gmail.com","is_corresponding":false,"name":"Janet Dick"},{"affiliations":["University of Pittsburgh, Pittsburgh, United States"],"email":"yurulin@pitt.edu","is_corresponding":false,"name":"Yu-Ru Lin"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1021","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1021/s-vds-1021_Preview.mp4?token=Gd9gotKCklMf66hdSrwI7pFZqqLfFnl_iLa5KU-lz50&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1021/s-vds-1021_Preview.srt?token=XCnK0MnML8SnYP-fJnKGuB59POVVoqT0E1qUtcyFcQA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"Q4PRivOX2CQ","session_youtube_ff_link":"https://youtu.be/Q4PRivOX2CQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T17:55:00Z","title":"Interactive Counterfactual Exploration of Algorithmic Harms in Recommender Systems","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1029","abstract":"This position paper discusses the profound impact of Large Language Models (LLMs) on semantic change, emphasizing the need for comprehensive monitoring and visualization techniques. Building on established concepts from linguistics, we examine the interdependency between mental and language models, discussing how LLMs influence and are influenced by human cognition and societal context. We introduce three primary theories to conceptualize such influences: Recontextualization, Standardization, and Semantic Dementia, illustrating how LLMs drive, standardize, and potentially degrade language semantics.Our subsequent review categorizes methods for visualizing semantic change into frequency-based, embedding-based, and context-based techniques, being first in assessing their effectiveness in capturing linguistic evolution: Embedding-based methods are highlighted as crucial for a detailed semantic analysis, reflecting both broad trends and specific linguistic changes. We underscore the need for novel visual, interactive tools to monitor and explain semantic changes induced by LLMs, ensuring the preservation of linguistic diversity and mitigating linguistic biases. This work provides essential insights for future research on semantic change visualization and the dynamic nature of language evolution in the times of LLMs.","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"raphael.buchmueller@uni-konstanz.de","is_corresponding":true,"name":"Raphael Buchm\u00fcller"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"friederike.koerte@uni-konstanz.de","is_corresponding":false,"name":"Friederike K\u00f6rte"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1029","image_caption":"Hi, and thanks for joining. In a nutshell, our research looks at how Large Language Models are reshaping the conceptual framework of our language. While language change has traditionally been driven by socio-linguistic factors like metaphorization, we introduce three new ideas: recontextualization, standardization, and what we call semantic dementia. Using visual analytics, we can track these shifts to preserve linguistic diversity and reduce bias. We review key methods, like embedding-based techniques, to detect and explain these changes. In the end, we call for new visualization tools to better understand how LLMs are impacting our language. Thanks for watching.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1029/s-vds-1029_Preview.mp4?token=t9ktK8xhtnb0aavzIMa18Y4uVFntG62g5EOSZEZ1gfE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1029/s-vds-1029_Preview.srt?token=PAuO0koDnAkjI8Xm8CFJWMVlQws48T_83jbrIiX_mHU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T18:05:00Z","title":"Seeing the Shift: Keep an Eye on Semantic Changes in Times of LLMs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1008","abstract":"Stress is among the most commonly employed quality metrics and optimization criteria for dimension reduction projections of high-dimensional data. Complex, high-dimensional data is ubiquitous across many scientific disciplines, including machine learning, biology, and the social sciences. One of the primary methods of visualizing these datasets is with two-dimensional scatter plots that visually capture some properties of the data. Because visually determining the accuracy of these plots is challenging, researchers often use quality metrics to measure the projection\u2019s accuracy or faithfulness to the full data. One of the most commonly employed metrics, normalized stress, is sensitive to uniform scaling (stretching, shrinking) of the projection, despite this act not meaningfully changing anything about the projection. We investigate the effect of scaling on stress and other distance-based quality metrics analytically and empirically by showing just how much the values change and how this affects dimension reduction technique evaluations. We introduce a simple technique to make normalized stress scale-invariant and show that it accurately captures expected behavior on a small benchmark.","accessible_pdf":true,"authors":[{"affiliations":["University of Arizona, Tucson, United States"],"email":"ksmelser@arizona.edu","is_corresponding":false,"name":"Kiran Smelser"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"jacobmiller1@arizona.edu","is_corresponding":true,"name":"Jacob Miller"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"stephen.kobourov@tum.de","is_corresponding":false,"name":"Stephen Kobourov"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1008","image_caption":"MDS, t-SNE, and RND (random) embeddings of the well-known Iris dataset from left to right (bottom). The plot (top) shows the values of the normalized stress metric for these three embeddings and clearly illustrates the sensitivity to scale. As one uniformly scales the embeddings to be larger or smaller, the value of normalized stress changes. Notably, at different scales, different embeddings have lower stress, including the absurd situation where the random embedding has the lowest stress (beyond scale 9). Moreover, the expected order of MDS, t-SNE, RND is only found briefly at a scalar value slightly greater than 0.25 (hardly visible in the plot), and all six different algorithm orders can be found by selecting different scales. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.07724","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-beliv/w-beliv-1008/w-beliv-1008_Preview.mp4?token=Um2oJjnT2y3HO9IljRJKeSSUa93W3n11qrkN3dI3dAs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-beliv/w-beliv-1008/w-beliv-1008_Preview.srt?token=okgeJ3fLQmb9x7DcRRyrZCL_4cYRm7_qRC2KHtcoOXY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"p1bNgrfXToY","session_youtube_ff_link":"https://youtu.be/p1bNgrfXToY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=1h58m57s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Normalized Stress is Not Normalized: How to Interpret Stress Correctly","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1015","abstract":"Empirical studies in visualisation often compare visual representations to identify the most effective visualisation for a particular visual judgement or decision making task. However, the effectiveness of a visualisation may be intrinsically related to, and difficult to distinguish from, factors such as visualisation literacy. Complicating matters further, visualisation literacy itself is not a singular intrinsic quality, but can be a result of several distinct challenges that a viewer encounters when performing a task with a visualisation. In this paper, we describe how such challenges apply to experiments that we use to evaluate visualisations, and discuss a set of considerations for designing studies in the future. Finally, we argue that aspects of the study design which are often neglected or overlooked (such as the onboarding of participants, tutorials, training etc.) can have a big role in the results of a study and can potentially impact the conclusions that the researchers can draw from the study.","accessible_pdf":false,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"abhraneel@u.northwestern.edu","is_corresponding":true,"name":"Abhraneel Sarma"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"shenglong@u.northwestern.edu","is_corresponding":false,"name":"Sheng Long"},{"affiliations":["Northeastern University, Portland, United States"],"email":"m.correll@northeastern.edu","is_corresponding":false,"name":"Michael Correll"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1015","image_caption":"The 'Telephone' framework describes two possible pathways of participants\u2019 behaviour in experiments. In the desired pathway, a user performs the experimental task using the optimal strategy, allowing the researcher to estimate a measure of visualisation effectiveness. However, this desired pathway may not always manifest in practice. What an experiment instead might be measuring is described through the alternative pathway\u2014a user performs what they think the task is, using a strategy which they think best supports this perceived task; the experiment is actually measuring how well the visualisation supports a user in performing their perceived task using their perceived optimal strategy.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/d849a","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=1h47m58s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Tasks and Telephone: Understanding Barriers to Inference due to Issues in Experiment Design","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1016","abstract":"This position paper critically examines the graphical inference framework for evaluating visualizations using the lineup task. We present a re-analysis of lineup task data using signal detection theory, applying four Bayesian non-linear models to investigate whether color ramps with more color name variation increase false discoveries. Our study utilizes data from Reda and Szafir\u2019s previous work [20], corroborating their findings while providing additional insights into sensitivity and bias differences across colormaps and individuals. We suggest improvements to lineup study designs and explore the connections between graphical inference, signal detection theory, and statistical decision theory. Our work contributes a more perceptually grounded approach for assessing visualization effectiveness and offers a path forward for better aligning graphical inference methods with human cognition. The results have implications for the development and evaluation of visualizations, particularly for exploratory data analysis scenarios. Supplementary materials are available at https://osf.io/xd5cj/.","accessible_pdf":false,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"shenglong@u.northwestern.edu","is_corresponding":true,"name":"Sheng Long"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1016","image_caption":"The image connects a visual lineup task with signal detection theory. It shows a lineup of multivariate images where participants identify if one differs or if there's \"no discernible difference.\" Signal detection theory analyzes this data, assuming perceptual evidence for signal presence/absence as overlapping probability distributions. This quantifies observer sensitivity and decision criterion, separating perceptual sensitivity from response bias. The graphs illustrate concepts like false alarm rate, hit rate, and sensitivity (d'), demonstrating how the theory applies to perceptual decision-making in visual discrimination tasks.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/ghru8","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=1h36m44s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Old Wine in a New Bottle? Analysis of Visual Lineups with Signal Detection Theory","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1018","abstract":"Visualising personal experiences is often described as a means for self-reflection, shaping one\u2019s identity, and sharing it with others. In policymaking, personal narratives are regarded as an important source of intelligence to shape public discourse andpolicy. Therefore, policymakers are interested in the interplay between individual-level experiences and macro-political processes that play into shaping these experiences. In this context, visualisation is regarded as a medium for advocacy, creating a power balance between individuals and the power structures that influence their health and well-being. In this paper, we offer a politically-framed reflection on how visualisation creators define lived experience data, and what design choices they make for visualising them. We identify data characteristics and design choices that enable visualisation authors and consumers to engage in a process of narrative co-construction, while navigating structural forms of inequality. Our political framing is driven by ideas of master and alternative narratives from Diversity Science, in which authors and narrators engage in a process of negotiation with power structures to either maintain or challenge the status quo.","accessible_pdf":true,"authors":[{"affiliations":["City, University of London, London, United Kingdom"],"email":"mai.elshehaly@city.ac.uk","is_corresponding":true,"name":"Mai Elshehaly"},{"affiliations":["City, University of London, London, United Kingdom"],"email":"mirela.reljan-delaney@city.ac.uk","is_corresponding":false,"name":"Mirela Reljan-Delaney"},{"affiliations":["City, University of London, London, United Kingdom"],"email":"j.dykes@city.ac.uk","is_corresponding":false,"name":"Jason Dykes"},{"affiliations":["City, University of London, London, United Kingdom"],"email":"a.slingsby@city.ac.uk","is_corresponding":false,"name":"Aidan Slingsby"},{"affiliations":["City, University of London, London, United Kingdom"],"email":"j.d.wood@city.ac.uk","is_corresponding":false,"name":"Jo Wood"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"sam.spiegel@ed.ac.uk","is_corresponding":false,"name":"Sam Spiegel"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1018","image_caption":"The Master Narrative Framework for Visualization, which may be useful for exposing Master narratives, developing Alternative narratives and establishing Personal narratives in visualization design, critique and education. Adapted from Syed and McLean [42]. We argue that the contrast between mater, alternative, and personal narratives can better define the role of visualisation in advocacy and shaping policy. We use Wee People in this figure, a typeface of people silhouettes https://github.com/propublica/weepeople .","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://invisai.com/mai/papers/Visualising_Lived_Experience-beliv2024.pdf","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=1h7m52s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Visualising Lived Experience: Learning from a Master andAlternative Narrative Framing","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1020","abstract":"The generation and presentation of counterfactual explanations (CFEs) are a commonly used, model-agnostic approach to helping end-users reason about the validity of AI/ML model outputs. By demonstrating how sensitive the model's outputs are to minor variations, CFEs are thought to improve understanding of the model's behavior, identify potential biases, and increase the transparency of 'black box models'.Here, we examine how CFEs support a diverse audience, both with and without technical expertise, to understand the results of an LLM-informed sentiment analysis. We conducted a preliminary pilot study with ten individuals with varied expertise from rangingNLP, ML, and ethics, to specific domains. All individuals were actively using or working with AI/ML technology as part of their daily jobs. Through semi-structured interviews grounded in a set of concrete examples, we examined how CFEs influence participants' perceptions of the model's correctness, fairness, and trustworthiness, and how visualization of CFEs specifically influences those perceptions. We also surface how participants wrestle with their internal definitions of `explainability', relative to what CFEs present, their cultures, and backgrounds, in addition to the, much more widely studied phenomena, of comparing their baseline expectations of the model's performance. Compared to prior research, our findings highlight the sociotechnical frictions that CFEs surface but do not necessarily remedy. We conclude with the design implications of developing transparent AI/ML visualization systems for more general tasks.","accessible_pdf":false,"authors":[{"affiliations":["Tableau Research, Seattle, United States"],"email":"amcrisan@uwaterloo.ca","is_corresponding":true,"name":"Anamaria Crisan"},{"affiliations":["Tableau Software, Seattle, United States"],"email":"nbutters@salesforce.com","is_corresponding":false,"name":"Nathan Butters"},{"affiliations":["Tableau Software, Seattle, United States"],"email":"zoezoezoe.cc@gmail.com","is_corresponding":false,"name":"Zoe Zoe"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-beliv-1020","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=1h20m52s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Exploring Subjective Notions of Explainability through Counterfactual Visualization of Sentiment Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1037","abstract":"Qualitative data analysis is widely adopted for user evaluation, not only in the Visualisation community but also related communities, such as Human-Computer Interaction and Augmented and Virtual Reality. However, the data analysis process is often not clearly described and the results are often simply listed in the form of interesting quotes from or summaries of quotes that were uttered by study participants. This position paper proposes an early concept for the use of a researcher as an \u201cAdvocatus Diaboli\u201d, or devil\u2019s advocate, to try to disprove the results of the data analysis by looking for quotes that contradict the findings or leading questions and task designs. Whatever this devil\u2019s advocate finds can then be used to reiterate on the findings and the analysis process to form more suitable theories. On the other hand, researchers are enabled to clarify why they did not include this in their theory. This process could increase transparency in the qualitative data analysis process and increase trust in these findings, while being mindful of the necessary resources.","accessible_pdf":false,"authors":[{"affiliations":["University of Applied Sciences Upper Austria, Hagenberg, Austria"],"email":"judith.friedl-knirsch@fh-hagenberg.at","is_corresponding":true,"name":"Judith Friedl-Knirsch"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1037","image_caption":"A sketch of the Advocatus Diaboli process for qualitative data analysis. First, the primary researcher analyses the collected data. Then a secondary researcher assumes the position of an Advocatus Diaboli and attempts to disprove the findings of the primary researcher based on the collected data. Finally, both researchers discuss the findings of the Advocatus Diaboli and adapt the results if necessary.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=0h57m3s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Position paper: Proposing the use of an \u201cAdvocatus Diaboli\u201d as a pragmatic approach to improve transparency in qualitative data analysis and reporting","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1001","abstract":"I analyze the evolution of papers certified by the Graphics Replicability Stamp Initiative (GRSI) to be reproducible, with a specific focus on the subset of publications that address visualization-related topics. With this analysis I show that, while the number of papers is increasing overall and within the visualization field, we still have to improve quite a bit to escape the replication crisis. I base my analysis on the data published by the GRSI as well as publication data for the different venues in visualization and lists of journal papers that have been presented at visualization-focused conferences. I also analyze the differences between the involved journals as well as the percentage of reproducible papers in the different presentation venues. Furthermore, I look at the authors of the publications and, in particular, their affiliation countries to see where most reproducible papers come from. Finally, I discuss potential reasons for the low reproducibility numbers and suggest possible ways to overcome these obstacles. This paper is reproducible itself, with source code and data available from github.com/tobiasisenberg/Visualization-Reproducibility as well as a free paper copy and all supplemental materials at osf.io/mvnbj.","accessible_pdf":false,"authors":[{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tobias.isenberg@gmail.com","is_corresponding":true,"name":"Tobias Isenberg"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1001","image_caption":"In my paper I analyze the evolution of reproducible contributions to the graphics and visualization fields as certified by the Graphics Replicability Stamp Initiative. I focus specifically on the visualization field and discuss reasons for the still relatively low counts of reproducible papers.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.03889","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-beliv/w-beliv-1001/w-beliv-1001_Preview.mp4?token=v4kN7_ZczcC7CarhJ9s2gX88LaEgPkpOIuGiLWAvREA&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"CJ9FIt62O5o","session_youtube_ff_link":"https://youtu.be/CJ9FIt62O5o","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=1h6m53s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"The State of Reproducibility Stamps for Visualization Research Papers","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1004","abstract":"In the rapidly evolving field of information visualization, rigorous evaluation is essential for validating new techniques, understanding user interactions, and demonstrating the effectiveness of visualizations. The evaluation of visualization systems is fundamental to ensuring their effectiveness, usability, and impact. Faithful evaluations provide valuable insights into how users interact with and perceive the system, enabling designers to make informed decisions about design choices and improvements. However, an emerging trend of multiple evaluations within a single study raises critical questions about the sustainability, feasibility, and methodological rigor of such an approach. So, how many evaluations are enough? is a situational question and cannot be formulaically determined. Our objective is to summarize current trends and patterns to understand general practices across different contribution and evaluation types. New researchers and students, influenced by this trend, may believe-- multiple evaluations are necessary for a study. However, the number of evaluations in a study should depend on its contributions and merits, not on the trend of including multiple evaluations to strengthen a paper. In this position paper, we identify this trend through a non-exhaustive literature survey of TVCG papers from issue 1 in 2023 and 2024. We then discuss various evaluation strategy patterns in the information visualization field and how this paper will open avenues for further discussion.","accessible_pdf":true,"authors":[{"affiliations":["University of North Carolina at Chapel Hill, Chapel Hill, United States"],"email":"flin@unc.edu","is_corresponding":false,"name":"Feng Lin"},{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"zeyuwang@cs.unc.edu","is_corresponding":true,"name":"Arran Zeyu Wang"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"dilshadur@sci.utah.edu","is_corresponding":false,"name":"Md Dilshadur Rahman"},{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"danielle.szafir@cs.unc.edu","is_corresponding":false,"name":"Danielle Albers Szafir"},{"affiliations":["University of Oklahoma, Norman, United States"],"email":"quadri@ou.edu","is_corresponding":false,"name":"Ghulam Jilani Quadri"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1004","image_caption":"(Left) Distribution of four evaluation methods (quantitative, qualitative, case study, and mixed methods) across 214 papers, showing whether each type was not utilized, used once, or used multiple times within single study. (Middle) Venn diagram showing the overlap of papers using quantitative, qualitative, and case study evaluations. (Right) Grouped bar chart of the proportion of five paper categories (experimental, survey, system, application, and technique), illustrating the distribution of evaluation methods used in each category. Quantitative and case studies are common in technique papers, while experimental papers often use both quantitative and qualitative methods.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://www.arxiv.org/abs/2408.16080","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=2h2m49s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Striking the Right Balance: Systematic Assessment of Evaluation Method Distribution Across Contribution Types","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1005","abstract":"Various standardized tests exist that assess individuals' visualization literacy. Their use can help to draw conclusions from studies. However, it is not taken into account that the test itself can create a pressure situation where participants might fear being exposed and assessed negatively. This is especially problematic when testing domain experts in design studies. We conducted interviews with experts from different domains performing the Mini-VLAT test for visualization literacy to identify potential problems. Our participants reported that the time limit per question, ambiguities in the questions and visualizations, and missing steps in the test procedure mainly had an impact on their performance and content. We discuss possible changes to the test design to address these issues and how such assessment methods could be integrated into existing evaluation procedures.","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"seyda.oeney@visus.uni-stuttgart.de","is_corresponding":true,"name":"Seyda \u00d6ney"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"moataz.abdelaal@visus.uni-stuttgart.de","is_corresponding":false,"name":"Moataz Abdelaal"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"kuno.kurzhals@visus.uni-stuttgart.de","is_corresponding":false,"name":"Kuno Kurzhals"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"paul.betz@sowi.uni-stuttgart.de","is_corresponding":false,"name":"Paul Betz"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"cordula.kropp@sowi.uni-stuttgart.de","is_corresponding":false,"name":"Cordula Kropp"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1005","image_caption":"Domain experts may be asked to take the Mini-VLAT test to assess their visualization skills. However, factors such as the time limit on each question could cause stress, potentially affecting their performance.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.08101","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=1h23m38s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Testing the Test: Observations When Assessing Visualization Literacy of Domain Experts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1007","abstract":"In visualization, the process of transforming raw data into visually comprehensible representations is pivotal. While existing models like the Information Visualization Reference Model describe the data-to-visual mapping process, they often overlook a crucial intermediary step: design-specific transformations. This process, occurring after data transformation but before visual-data mapping, further derives data, such as groupings, layout, and statistics, that are essential to properly render the visualization. In this paper, we advocate for a deeper exploration of design-specific transformations, highlighting their importance in understanding visualization properties, particularly in relation to user tasks. We incorporate design-specific transformations into the Information Visualization Reference Model and propose a new formalism that encompasses the user task as a function over data. The resulting formalism offers three key benefits over existing visualization models: (1) describing tasks as compositions of functions, (2) enabling analysis of data transformations for visual-data mapping, and (3) empowering reasoning about visualization correctness and effectiveness. We further discuss the potential implications of this model on visualization theory and visualization experiment design.","accessible_pdf":false,"authors":[{"affiliations":["Columbia University, New York City, United States"],"email":"ewu@cs.columbia.edu","is_corresponding":true,"name":"eugene Wu"},{"affiliations":["Tufts University, Medford, United States"],"email":"remco@cs.tufts.edu","is_corresponding":false,"name":"Remco Chang"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1007","image_caption":"We propose to extend the Infovis Reference Model to explicitly model the role of design-specific data transformations in visualization design. This model decomposes visual mappings into design-specific transformations (e.g., stacking, quantization, calculating statistics) and a visual encoding. We further propose to model tasks as functions over the input data that the user wishes to estimate using the visualization. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h25m34s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Design-Specific Transforms In Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1009","abstract":"The cognitive processes involved in understanding and misunderstanding visualizations have not yet been fully clarified, even for well-studied designs, such as bar charts. In particular, little is known about whether viewers can improve their learning processes by getting better insight into their own cognition. This paper describes a simple method to measure the role of such metacognitive understanding when learning to read bar charts. For this purpose, we conducted an experiment in which we investigated bar chart learning repeatedly, and tested how learning over trials was effected by metacognitive understanding. We integrate the findings into a model of metacognitive processing of visualizations, and discuss implications for the design of visualizations.","accessible_pdf":false,"authors":[{"affiliations":["Heidelberg University, Heidelberg, Germany"],"email":"antonia.schlieder@t-online.de","is_corresponding":true,"name":"Antonia Schlieder"},{"affiliations":["Heidelberg University, Heidelberg, Germany"],"email":"jan.rummel@psychologie.uni-heidelberg.de","is_corresponding":false,"name":"Jan Rummel"},{"affiliations":["Ruprecht-Karls-Universit\u00e4t Heidelberg, Heidelberg, Germany"],"email":"palbers@mathi.uni-heidelberg.de","is_corresponding":false,"name":"Peter Albers"},{"affiliations":["Heidelberg University, Heidelberg, Germany"],"email":"sadlo@uni-heidelberg.de","is_corresponding":false,"name":"Filip Sadlo"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1009","image_caption":"Metacognition is a feature of the cognitive system to monitor and control its cognitive processes. Consequently, one can describe metacognition as the human ability to reflect, to think about thinking, and to adapt our thinking when we deem it necessary. Truncating the y-axis of a bar chart can make the visualization deceptive in terms of certain visual reasoning tasks. In an experiment, we show that metacognitive processes are involved in understanding deceptive bar charts, i.e., that reasoners who are able to reflect on and adjust their strategies can improve their performance even without feedback on the correctness of their answers.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://vcg.iwr.uni-heidelberg.de/publications/pubdetails/Schlieder2024metacognition/","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h2m48s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"The Role of Metacognition in Understanding Deceptive Bar Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1021","abstract":"The replication crisis has spawned a revolution in scientific methods, aimed at increasing the transparency, robustness, and reliability of scientific outcomes. In particular, the practice of preregistering study designs has shown important advantages. Preregistration can help limit questionable research practices, as well as increase the success rate of study replications. Many fields have now adopted preregistration as a default expectation for published studies. In 2022, we set up a panel ``Merits and Limits of User Study Preregistration'' with the overall goal of explaining the concept of preregistration to a wide VIS audience and discussing its suitability for visualization research. We report on the arguments and discussion of this panel in the hope that it can benefit the visualization community at large.All materials and a copy of this paper are available on our OSF repository at https://osf.io/wes57/.","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"lonni.besancon@gmail.com","is_corresponding":true,"name":"Lonni Besan\u00e7on"},{"affiliations":["University of Virginia, Charlottesville, United States"],"email":"nosek@virginia.edu","is_corresponding":false,"name":"Brian Nosek"},{"affiliations":["Tilburg University, Tilburg, Netherlands"],"email":"t.l.haven@tilburguniversity.edu","is_corresponding":false,"name":"Tamarinde Haven"},{"affiliations":["Link\u00f6ping University, N\u00f6rrkoping, Sweden"],"email":"miriah.meyer@liu.se","is_corresponding":false,"name":"Miriah Meyer"},{"affiliations":["Northeastern University, Boston, United States"],"email":"c.dunne@northeastern.edu","is_corresponding":false,"name":"Cody Dunne"},{"affiliations":["Luxembourg Institute of Science and Technology, Belvaux, Luxembourg"],"email":"mohammad.ghoniem@gmail.com","is_corresponding":false,"name":"Mohammad Ghoniem"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1021","image_caption":"In this position paper, we summarize the 2022 panel's discussions and arguments for the wider visualization and human-computer interaction community, point to useful resources, and discuss implications along with any needed community-driven efforts. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/n7ej3","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h58m13s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Merits and Limits of Preregistration for Visualization Research","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1026","abstract":"Despite 30+ years of academic practice, visualization still lacks an explanation of how and why it functions in complex organizations performing knowledge work. This survey examines the intersection of organizational studies and visualization design, highlighting the concept of \\textit{boundary objects}, which visualization practitioners are adopting in both CSCW (computer-supported collaborative work) and HCI. This paper also collects the prior literature on boundary objects in visualization design studies, a methodology which maps closely to action research in organizations, and addresses the same problems of `knowing in common'. Process artifacts generated by visualization design studies function as boundary objects in their own right, facilitating knowledge transfer across disciplines within an organization. Currently, visualization faces the challenge of explaining how sense-making functions across domains, through visualization artifacts, and how these support decision-making. As a deeply interdisciplinary field, visualization should adopt the theory of boundary objects in order to embrace its plurality of domains and systems, whilst empowering its practitioners with a unified process-based theory.","accessible_pdf":false,"authors":[{"affiliations":["UC Santa Cruz, Santa Cruz, United States"],"email":"jtotto@ucsc.edu","is_corresponding":false,"name":"Jasmine Tan Otto"},{"affiliations":["California Institute of Technology, Pasadena, United States"],"email":"sd@scottdavidoff.com","is_corresponding":false,"name":"Scott Davidoff"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1026","image_caption":"A `transit network' map of knowledge transfer in complex organizations. Each station represents a stakeholder group. Each line represents a single vertical, pipeline, or other system along which visualization artifacts (and other data products) may flow, acting as vehicles for organizational knowledge. In this example, the Relay, Robotics, and Science Mission groups each include various domain experts and decision-makers; the HCI vertical includes both visualization practitioners (Design and Visualization) and their close-collaborator domain experts (Staffing and Allocation). In this analogy, the task of visualization theory is not just to provide artifacts which serve as `vehicles for knowledge', nor only to identify systems through which knowledge flows, but also to discover processes which explain who shares knowledge, where it needs to go, and why it is (not) getting there. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/9f5ub","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h48m49s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Visualization Artifacts are Boundary Objects","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1027","abstract":"Foundation models for vision and language are the basis of AI applications across numerous sectors of society. The success of these models stems from their ability to mimic human capabilities, namely visual perception in vision models, and analytical reasoning in large language models. As visual perception and analysis are fundamental to data visualization, in this position paper we ask: how can we harness foundation models to advance progress in visualization design? Specifically, how can multimodal foundation models (MFMs) guide visualization design through visual perception? We approach these questions by investigating the effectiveness of MFMs for perceiving visualization, and formalizing the overall visualization design and optimization space. Specifically, we think that MFMs can best be viewed as judges, equipped with the ability to criticize visualizations, and provide us with actions on how to improve a visualization. We provide a deeper characterization for text-to-image generative models, and multi-modal large language models, organized by what these models provide as output, and how to utilize the output for guiding design decisions. We hope that our perspective can inspire researchers in visualization on how to approach MFMs for visualization design.","accessible_pdf":false,"authors":[{"affiliations":["Vanderbilt University, Nashville, United States"],"email":"matthew.berger@vanderbilt.edu","is_corresponding":true,"name":"Matthew Berger"},{"affiliations":["Lawrence Livermore National Laboratory , Livermore, United States"],"email":"shusenl@sci.utah.edu","is_corresponding":false,"name":"Shusen Liu"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1027","image_caption":"We characterize the use of multimodal foundation models for guiding visualization design.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h14m41s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"[position paper] The Visualization JUDGE : Can Multimodal Foundation Models Guide Visualization Design Through Visual Perception?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1033","abstract":"Submissions of original research that use Large Language Models (LLMs) or that study their behavior, suddenly account for a sizable portion of works submitted and accepted to visualization (VIS) conferences and similar venues in human-computer interaction (HCI).In this brief position paper, I argue that reviewers are relatively unprepared to evaluate these submissions effectively. To support this conjecture I reflect on my experience serving on four program committees forVIS and HCI conferences over the past year. I will describe common reviewer critiques that I observed and highlight how these critiques influence the review process. I also raise some concerns about these critiques that could limit applied LLM research to all but the best-resourced labs. While I conclude with suggestions for evaluating research contributions that incorporate LLMs, the ultimate goal of this position paper is to simulate a discussion on the review process and its challenges.","accessible_pdf":false,"authors":[{"affiliations":["Tableau Research, Seattle, United States"],"email":"amcrisan@uwaterloo.ca","is_corresponding":true,"name":"Anamaria Crisan"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-beliv-1033","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=1h37m33s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"We Don't Know How to Assess LLM Contributions in VIS/HCI","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1034","abstract":"This paper revisits the role of quantitative and qualitative methods in visualization research in the context of advancements in artificial intelligence (AI). The focus is on how we can bridge between the different methods in an integrated process of analyzing user study data. To this end, a process model of - potentially iterated - semantic enrichment of data is proposed. This joint perspective of data and semantics facilitates the integration of quantitative and qualitative methods. The model is motivated by examples of prior work, especially in the area of eye tracking user studies and coding data-rich observations. Finally, there is a discussion of open issues and research opportunities in the interplay between AI and qualitative and quantitative methods for visualization research.","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":true,"name":"Daniel Weiskopf"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1034","image_caption":"Illustration of the approach that helps bridge quantitative and qualitative methods for visualization research. The schematic process comprises the research question, study design and execution, and iterative analysis of (possibly multimodal) study data. The key part is the analysis loop that keeps on transforming and enriching data with additional semantics to derive new data representations. Through the process, information is obtained at higher and higher levels of understanding. The analysis loop may consist of AI-based processing, user intervention, or a combination thereof.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.07250","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=1h52m33s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Bridging Quantitative and Qualitative Methods for Visualization Research: A Data/Semantics Perspective in the Light of Advanced AI","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1035","abstract":"Complexity is often seen as a inherent negative in information design, with the job of the designer being to reduce or eliminate complexity, and with principles like Tufte\u2019s \u201cdata-ink ratio\u201d or \u201cchartjunk\u201d to operationalize minimalism and simplicity in visualizations. However, in this position paper, we call for a more expansive view of complexity as a design material, like color or texture or shape: an element of information design that can be used in many ways, many of which are beneficial to the goals of using data to understand the world around us. We describe complexity as a phenomenon that occurs not just in visual design but in every aspect of the sensemaking process, from data collection to interpretation. For each of these stages, we present examples of ways that these various forms of complexity can be used (or abused) in visualization design. We ultimately call on the visualization community to build a more nuanced view of complexity, to look for places to usefully integrate complexity in multiple stages of the design process, and, even when the goal is to reduce complexity, to look for the non-visual forms of complexity that may have otherwise been overlooked.","accessible_pdf":true,"authors":[{"affiliations":["University for Continuing Education Krems, Krems, Austria"],"email":"florian.windhager@donau-uni.ac.at","is_corresponding":false,"name":"Florian Windhager"},{"affiliations":["King's College London, London, United Kingdom"],"email":"alfie.abdulrahman@gmail.com","is_corresponding":false,"name":"Alfie Abdul-Rahman"},{"affiliations":["University of Applied Sciences Potsdam, Potsdam, Germany"],"email":"mark-jan.bludau@fh-potsdam.de","is_corresponding":false,"name":"Mark-Jan Bludau"},{"affiliations":["Warwick Institute for the Science of Cities, Coventry, United Kingdom"],"email":"nicole.hengesbach@posteo.de","is_corresponding":false,"name":"Nicole Hengesbach"},{"affiliations":["University of Amsterdam, Amsterdam, Netherlands"],"email":"h.lamqaddam@uva.nl","is_corresponding":false,"name":"Houda Lamqaddam"},{"affiliations":["OCAD University, Toronto, Canada"],"email":"meirelles.isabel@gmail.com","is_corresponding":false,"name":"Isabel Meirelles"},{"affiliations":["TU Eindhoven, Eindhoven, Netherlands"],"email":"b.speckmann@tue.nl","is_corresponding":false,"name":"Bettina Speckmann"},{"affiliations":["Northeastern University, Portland, United States"],"email":"m.correll@northeastern.edu","is_corresponding":true,"name":"Michael Correll"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1035","image_caption":"Axes of complexity and complexity transformation in visualization design, bridging from project initiation complexity to the complexity of interpretation and communication activities, using the metaphor of a mixing board. A designer might strategically employ higher or lower levels of complexity across these axes to achieve a desired effect. Likewise, changes to one type of complexity shift complexity to other parts of the pipeline.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.07465","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h38m12s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Complexity as Design Material","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1090","abstract":"Visualizing high dimensional data is challenging, since any dimensionality reduction technique will distort distances. A classic method in cartography\u2013Tissot\u2019s Indicatrix, specific to sphere-to-plane maps\u2013 visualizes distortion using ellipses. Inspired by this idea, we describe the hypertrix: a method for representing distortions that occur when data is projected from arbitrarily high dimensions onto a 2D plane. We demonstrate our technique through synthetic and real-world datasets, and describe how this indicatrix can guide interpretations of nonlinear dimensionality reduction.","accessible_pdf":true,"authors":[{"affiliations":["Harvard University, Boston, United States"],"email":"sraval@g.harvard.edu","is_corresponding":true,"name":"Shivam Raval"},{"affiliations":["Harvard University, Cambridge, United States","Google Research, Cambridge, United States"],"email":"viegas@google.com","is_corresponding":false,"name":"Fernanda Viegas"},{"affiliations":["Harvard University, Cambridge, United States","Google Research, Cambridge, United States"],"email":"wattenberg@gmail.com","is_corresponding":false,"name":"Martin Wattenberg"}],"award":"best","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1090","image_caption":"Hypertrix is an indicatrix for visualizing distortions in high-dimensional data projections. It is an overlay of colored elliptical glyphs on data projections, revealing both the magnitude and direction of local distortions. The hypertrix for a t-SNE projection of the MNIST dataset reveals the compactness of the digit '1' cluster with respect to other clusters.","keywords":["Dimensionality Reduction, High-dimensional data\u2014Distortion\u2014Text Visualization, Clustering"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1090/v-short-1090_Preview.mp4?token=8OKGBT9QpGEiRm869wnviDwZ80_4q4AydJZJrbQVf2k&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1090/v-short-1090_Preview.srt?token=Uaksc-EWIVtQugoqjEhepEOzSd5w8KCNpxZ2dMHiapk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards1","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"VGTC Awards & Best Short Papers","session_uid":"v-short","session_youtube_ff_id":"4S9S0DlrE14","session_youtube_ff_link":"https://youtu.be/4S9S0DlrE14","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/WZR6DttAYvo&t=1h2m3s","sessions":["VGTC Awards & Best Short Papers"],"time_stamp":"2024-10-15T15:10:00Z","title":"Hypertrix: An indicatrix for high-dimensional visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1150","abstract":"Exploratory visual data analysis tools empower data analysts to efficiently and intuitively explore data insights throughout the entire analysis cycle. However, the gap between common programmatic analysis (e.g., within computational notebooks) and exploratory visual analysis leads to a disjointed and inefficient data analysis experience. To bridge this gap, we developed PyGWalker, a Python library that offers on-the-fly assistance for exploratory visual data analysis. It features a lightweight and intuitive GUI with a shelf builder modality. Its loosely coupled architecture supports multiple computational environments to accommodate varying data sizes. Since its release in February 2023, PyGWalker has gained much attention, with 612k downloads on PyPI and over 10.5k stars on GitHub as of June 2024. This demonstrates its value to the data science and visualization community, with researchers and developers integrating it into their own applications and studies.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China","Kanaries Data Inc., Hangzhou, China"],"email":"yue.yu@connect.ust.hk","is_corresponding":true,"name":"Yue Yu"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"lshenaj@connect.ust.hk","is_corresponding":false,"name":"Leixian Shen"},{"affiliations":["Kanaries Data Inc., Hangzhou, China"],"email":"feilong@kanaries.net","is_corresponding":false,"name":"Fei Long"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"},{"affiliations":["Kanaries Data Inc., Hangzhou, China"],"email":"haochen@kanaries.net","is_corresponding":false,"name":"Hao Chen"}],"award":"best","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1150","image_caption":"The image shows the interface of PyGWalker integrated into a Jupyter Notebook. PyGWalker is invoked with a single line of code, allowing users to seamlessly explore and visualize data using drag-and-drop functionality. Its user-friendly interface supports flexible data transformation and interactive visualization, making it popular among the data science community with over 612k downloads through PyPI and 10.8k stars on GitHub.","keywords":["Data Visualization; Exploratory Data Analysis; Computational Notebooks"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2406.11637","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1150/v-short-1150_Preview.mp4?token=flp8n1Z0vXAqQ_oEIRqfsm2QSpr9E6p9FOk9Al4a2YQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1150/v-short-1150_Preview.srt?token=f_y8rHX2MObQ6oElWkMOBTqgjzqz_sECigGOgg1TWf8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards1","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"VGTC Awards & Best Short Papers","session_uid":"v-short","session_youtube_ff_id":"snDdcF8cbO4","session_youtube_ff_link":"https://youtu.be/snDdcF8cbO4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/WZR6DttAYvo&t=1h15m57s","sessions":["VGTC Awards & Best Short Papers"],"time_stamp":"2024-10-15T15:21:00Z","title":"PyGWalker: On-the-fly Assistant for Exploratory Visual Data Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1077","abstract":"A growing body of work draws on feminist thinking to challenge assumptions about how people engage with and use visualizations. This work draws on feminist values, driving design and research guidelines that account for the influences of power and neglect. This prior work is largely prescriptive, however, forgoing articulation of how feminist theories of knowledge \u2014 or feminist epistemology \u2014 can alter research design and outcomes. At the core of our work is an engagement with feminist epistemology, drawing attention to how a new framework for how we know what we know enabled us to overcome intellectual tensions in our research. Specifically, we focus on the theoretical concept of entanglement, central to recent feminist scholarship, and contribute: a history of entanglement in the broader scope of feminist theory; an articulation of the main points of entanglement theory for a visualization context; and a case study of research outcomes as evidence of the potential of feminist epistemology to impact visualization research. This work answers a call in the community to embrace a broader set of theoretical and epistemic foundations and provides a starting point for bringing feminist theories into visualization research.","accessible_pdf":true,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"derya.akbaba@liu.se","is_corresponding":true,"name":"Derya Akbaba"},{"affiliations":["Emory University, Atlanta, United States"],"email":"lauren.klein@emory.edu","is_corresponding":false,"name":"Lauren Klein"},{"affiliations":["Link\u00f6ping University, N\u00f6rrkoping, Sweden"],"email":"miriah.meyer@liu.se","is_corresponding":false,"name":"Miriah Meyer"}],"award":"best","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1077","image_caption":"A series of overlapping circles that are made up of four concentric circles. The inner circle is labeled the knowledge artifact, then entanglements with phenomenon, then entanglements with apparatus, then entanglements. These concentric circles overlap in a wave of entanglements and cover topics listed as: data, vis, insight, power, conventions, technology, history, processes, materiality, people, society, design, labor, politics, ethics, places.","keywords":["Epistemology, feminism, entanglement, theory"],"open_access_supplemental_link":"https://osf.io/ubrdy/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/rw35g","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1077/v-full-1077_Preview.mp4?token=6NSXfqcdZIqgeto12eTKiolmd1ailFSS5Ylrv9WGhQA&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards2","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Best Full Papers","session_uid":"v-full","session_youtube_ff_id":"x-XyV4J73t4","session_youtube_ff_link":"https://youtu.be/x-XyV4J73t4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/d-eG7NRcrKg&t=0h2m15s","sessions":["Best Full Papers"],"time_stamp":"2024-10-15T16:10:00Z","title":"Entanglements for Visualization: Changing Research Outcomes through Feminist Theory","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1232","abstract":"How do cancer cells grow, divide, proliferate, and die? How do drugs influence these processes? These are difficult questions that we can attempt to answer with a combination of time-series microscopy experiments, classification algorithms, and data visualization.However, collecting this type of data and applying algorithms to segment and track cells and construct lineages of proliferation is error-prone; and identifying the errors can be challenging since it often requires cross-checking multiple data types. Similarly, analyzing and communicating the results necessitates synthesizing different data types into a single narrative. State-of-the-art visualization methods for such data use independent line charts, tree diagrams, and images in separate views. However, this spatial separation requires the viewer of these charts to combine the relevant pieces of data in memory. To simplify this challenging task, we describe design principles for weaving cell images, time-series data, and tree data into a cohesive visualization. Our design principles are based on choosing a primary data type that drives the layout and integrates the other data types into that layout. We then introduce Aardvark, a system that uses these principles to implement novel visualization techniques. Based on Aardvark, we demonstrate the utility of each of these approaches for discovery, communication, and data debugging in a series of case studies. ","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"devin@sci.utah.edu","is_corresponding":true,"name":"Devin Lange"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"robert.judson-torres@hci.utah.edu","is_corresponding":false,"name":"Robert L Judson-Torres"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"tzangle@chemeng.utah.edu","is_corresponding":false,"name":"Thomas A Zangle"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"alex@sci.utah.edu","is_corresponding":false,"name":"Alexander Lex"}],"award":"best","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1232","image_caption":"Live-cell microscopy imaging results in multimodal data composed of trees, time-series, and images. The visualization system Aardvark combines these data modalities into composite visualizations. The tree-first visualization (left) shows the cell relationships as a node-link tree visualization, horizon charts show the time series data and image snippets display alongside the horizon charts. The time-series-first visualization (top right) shows the time-series data as line charts with images and cell relationships superimposed. Finally, the image-first visualization (bottom right) shows the full microscopy image, with cell movement and relationships superimposed.","keywords":["Visualization, Cell Microscopy, View Composition"],"open_access_supplemental_link":"https://osf.io/3f6kr/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/cdbm6","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1232/v-full-1232_Preview.mp4?token=LCnEMjUCrDIXrJNKZSfQPb2d5F8T_eckC8Qqrdxmxa0&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards2","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Best Full Papers","session_uid":"v-full","session_youtube_ff_id":"5kVue1ySnOk","session_youtube_ff_link":"https://youtu.be/5kVue1ySnOk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/d-eG7NRcrKg&t=0h19m12s","sessions":["Best Full Papers"],"time_stamp":"2024-10-15T16:25:00Z","title":"Aardvark: Composite Visualizations of Trees, Time-Series, and Images","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1332","abstract":"Translating natural language to visualization (NL2VIS) has shown great promise for visual data analysis, but it remains a challenging task that requires multiple low-level implementations, such as natural language processing and visualization design. Recent advancements in pre-trained large language models (LLMs) are opening new avenues for generating visualizations from natural language. However, the lack of a comprehensive and reliable benchmark hinders our understanding of LLMs\u2019 capabilities in visualization generation. In this paper, we address this gap by proposing a new NL2VIS benchmark called VisEval. Firstly, we introduce a high-quality and large-scale dataset. This dataset includes 2,524 representative queries covering 146 databases, paired with accurately labeled ground truths. Secondly, we advocate for a comprehensive automated evaluation methodology covering multiple dimensions, including validity, legality, and readability. By systematically scanning for potential issues with a number of heterogeneous checkers, VisEval provides reliable and trustworthy evaluation outcomes. We run VisEval on a series of state-of-the-art LLMs. Our evaluation reveals prevalent challenges and delivers essential insights for future advancements.","accessible_pdf":false,"authors":[{"affiliations":["Microsoft Research, Shanghai, China"],"email":"christy05.chen@gmail.com","is_corresponding":true,"name":"Nan Chen"},{"affiliations":["Microsoft Research, Shanghai, China"],"email":"scottyugochang@gmail.com","is_corresponding":false,"name":"Yuge Zhang"},{"affiliations":["Microsoft Research, Shanghai, China"],"email":"jiahangxu@microsoft.com","is_corresponding":false,"name":"Jiahang Xu"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"rk.ren@outlook.com","is_corresponding":false,"name":"Kan Ren"},{"affiliations":["Microsoft Research, Shanghai, China"],"email":"yuqyang@microsoft.com","is_corresponding":false,"name":"Yuqing Yang"}],"award":"best","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1332","image_caption":"Examples of visualization issues detected by VisEval: Llama (CodeLlama-7B) produces code that cannot be executed, while Gemini (Gemini-Pro) incorrectly maps the \"sum of Tonnage\" to the y-axis instead of \"count\" and lacks a legend for the \"Cargo ship\" color. GPT-3.5 fails to sort as specified and places the legend outside the canvas. Although GPT-4 almost meets the requirements, it still encounters overflow issues that impact readability.","keywords":["Visualization evaluation, automatic visualization, large language models, benchmark"],"open_access_supplemental_link":"https://github.com/microsoft/VisEval","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.00981","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1332/v-full-1332_Preview.mp4?token=tbykaWmlhAAS8qHK-3sM9HAod8Q6W5G5TKF9TC0sT64&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards2","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Best Full Papers","session_uid":"v-full","session_youtube_ff_id":"lKkg-pUufh8","session_youtube_ff_link":"https://youtu.be/lKkg-pUufh8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/d-eG7NRcrKg&t=0h36m12s","sessions":["Best Full Papers"],"time_stamp":"2024-10-15T16:40:00Z","title":"VisEval: A Benchmark for Data Visualization in the Era of Large Language Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1802","abstract":"In the biomedical domain, visualizing the document embeddings of an extensive corpus has been widely used in information-seeking tasks. However, three key challenges with existing visualizations make it difficult for clinicians to find information efficiently. First, the document embeddings used in these visualizations are generated statically by pretrained language models, which cannot adapt to the user's evolving interest. Second, existing document visualization techniques cannot effectively display how the documents are relevant to users\u2019 interest, making it difficult for users to identify the most pertinent information. Third, existing embedding generation and visualization processes suffer from a lack of interpretability, making it difficult to understand, trust and use the result for decision-making. In this paper, we present a novel visual analytics pipeline for user-driven document representation and iterative information seeking (VADIS). VADIS introduces a prompt-based attention model (PAM) that generates dynamic document embedding and document relevance adjusted to the user's query. To effectively visualize these two pieces of information, we design a new document map that leverages a circular grid layout to display documents based on both their relevance to the query and the semantic similarity. Additionally, to improve the interpretability, we introduce a corpus-level attention visualization method to improve the user's understanding of the model focus and to enable the users to identify potential oversight. This visualization, in turn, empowers users to refine, update and introduce new queries, thereby facilitating a dynamic and iterative information-seeking experience. We evaluated VADIS quantitatively and qualitatively on a real-world dataset of biomedical research papers to demonstrate its effectiveness.","accessible_pdf":false,"authors":[{"affiliations":["Ohio State University, Columbus, United States"],"email":"qiu.580@buckeyemail.osu.edu","is_corresponding":true,"name":"Rui Qiu"},{"affiliations":["The Ohio State University, Columbus, United States"],"email":"tu.253@osu.edu","is_corresponding":false,"name":"Yamei Tu"},{"affiliations":["Washington University School of Medicine in St. Louis, St. Louis, United States"],"email":"yenp@wustl.edu","is_corresponding":false,"name":"Po-Yin Yen"},{"affiliations":["The Ohio State University , Columbus , United States"],"email":"hwshen@cse.ohio-state.edu","is_corresponding":false,"name":"Han-Wei Shen"}],"award":"best","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1802","image_caption":"Traditional document maps cluster documents based on static embeddings, leading to confusing grouping with inconsistent semantic concepts. We propose Prompt-based Attention Model (PAM) that generates prompt-specific document representations to better align with human interest. Recognizing that not all documents are equally relevant to a user\u2019s specific interest, we present Relevance-preserving mapping to project documents based on both their relevance to the user\u2019s interest, and their inter-similarity under user\u2019s interest. The mapping features a circular layout that centralizes the most pertinent documents, which aligns with both human\u2019s natural viewing pattern and the distribution of documents\u2019 relevance.","keywords":["Attention visualization, dynamic document representation, document visualization, biomedical information seeking"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1802/v-full-1802_Preview.mp4?token=LA7YlgNQGkiZLyHEnN17Ob983PPFPSfpWFr16hnzl8M&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1802/v-full-1802_Preview.srt?token=_kmNMLgzJ1-tuMdPqbZvT77tnne1hHmQvmqvnSs1RrA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards2","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Best Full Papers","session_uid":"v-full","session_youtube_ff_id":"iafjQjWEHIY","session_youtube_ff_link":"https://youtu.be/iafjQjWEHIY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/d-eG7NRcrKg&t=0h52m38s","sessions":["Best Full Papers"],"time_stamp":"2024-10-15T16:55:00Z","title":"VADIS: A Visual Analytics Pipeline for Dynamic Document Representation and Information Seeking","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1880","abstract":"Merge trees are a valuable tool in the scientific visualization of scalar fields; however, current methods for merge tree comparisons are computationally expensive, primarily due to the exhaustive matching between tree nodes. To address this challenge, we introduce the Merge Tree Neural Network (MTNN), a learned neural network model designed for merge tree comparison. The MTNN enables rapid and high-quality similarity computation. We first demonstrate how to train graph neural networks, which emerged as effective encoders for graphs, in order to produce embeddings of merge trees in vector spaces for efficient similarity comparison. Next, we formulate the novel MTNN model that further improves the similarity comparisons by integrating the tree and node embeddings with a new topological attention mechanism. We demonstrate the effectiveness of our model on real-world data in different domains and examine our model\u2019s generalizability across various datasets. Our experimental analysis demonstrates our approach\u2019s superiority in accuracy and efficiency. In particular, we speed up the prior state-of-the-art by more than 100\u00d7 on the benchmark datasets while maintaining an error rate below 0.1%.","accessible_pdf":true,"authors":[{"affiliations":["Tulane University, New Orleans, United States"],"email":"yqin2@tulane.edu","is_corresponding":true,"name":"Yu Qin"},{"affiliations":["Montana State University, Bozeman, United States"],"email":"brittany.fasy@montana.edu","is_corresponding":false,"name":"Brittany Terese Fasy"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"cwenk@tulane.edu","is_corresponding":false,"name":"Carola Wenk"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"bsumma@tulane.edu","is_corresponding":false,"name":"Brian Summa"}],"award":"best","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1880","image_caption":"Merge tree comparisons are essential in scientific visualization but are often limited by the slow, computationally heavy process of matching tree nodes. Our Merge Tree Neural Network (MTNN) transforming merge tree comparison into a learning task. This innovation significantly reduces computation time by over 100 times, while maintaining near-perfect accuracy. MTNN stands out as a powerful tool for efficient and precise scientific visualization.","keywords":["computational topology, merge trees, graph neural networks"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2404.05879","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1880/v-full-1880_Preview.mp4?token=g3OGwgih9TpwR_wddxmFWU_U55zB38PzucsWWRcn5iY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1880/v-full-1880_Preview.srt?token=MbBPP-6mqwzqJUOgoWyt9bGZXNkqqkjKnD4AxqMvjJg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards2","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Best Full Papers","session_uid":"v-full","session_youtube_ff_id":"5x_3_xJ0xKc","session_youtube_ff_link":"https://youtu.be/5x_3_xJ0xKc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/d-eG7NRcrKg&t=1h9m8s","sessions":["Best Full Papers"],"time_stamp":"2024-10-15T17:10:00Z","title":"Rapid and Precise Topological Comparison with Merge Tree Neural Networks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10091124","abstract":"The Internet of Food (IoF) is an emerging field in smart foodsheds, involving the creation of a knowledge graph (KG) about the environment, agriculture, food, diet, and health. However, the heterogeneity and size of the KG present challenges for downstream tasks, such as information retrieval and interactive exploration. To address those challenges, we propose an interactive knowledge and learning environment (IKLE) that integrates three programming and modeling languages to support multiple downstream tasks in the analysis pipeline. To make IKLE easier to use, we have developed algorithms to automate the generation of each language. In addition, we collaborated with domain experts to design and develop a dataflow visualization system, which embeds the automatic language generations into components and allows users to build their analysis pipeline by dragging and connecting components of interest. We have demonstrated the effectiveness of IKLE through three real-world case studies in smart foodsheds.","accessible_pdf":true,"authors":[{"affiliations":"","email":"tu.253@osu.edu","is_corresponding":false,"name":"Yamei Tu"},{"affiliations":"","email":"wang.5502@osu.edu","is_corresponding":true,"name":"Xiaoqi Wang"},{"affiliations":"","email":"qiu.580@osu.edu","is_corresponding":false,"name":"Rui Qiu"},{"affiliations":"","email":"hwshen@cse.ohio-state.edu","is_corresponding":false,"name":"Han-Wei Shen"},{"affiliations":"","email":"mmmille6@wisc.edu","is_corresponding":false,"name":"Michelle Miller"},{"affiliations":"","email":"jinmeng.rao@wisc.edu","is_corresponding":false,"name":"Jinmeng Rao"},{"affiliations":"","email":"song.gao@wisc.edu","is_corresponding":false,"name":"Song Gao"},{"affiliations":"","email":"prhuber@ucdavis.edu","is_corresponding":false,"name":"Patrick R. Huber"},{"affiliations":"","email":"adhollander@ucdavis.edu","is_corresponding":false,"name":"Allan D. Hollander"},{"affiliations":"","email":"matthew@ic-foods.org","is_corresponding":false,"name":"Matthew Lange"},{"affiliations":"","email":"cgarcia@tacc.utexas.edu","is_corresponding":false,"name":"Christian R. Garcia"},{"affiliations":"","email":"jstubbs@tacc.utexas.edu","is_corresponding":false,"name":"Joe Stubbs"}],"award":"","doi":"10.1109/MCG.2023.3263960","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10091124","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10091124","image_caption":"(A) We propose an interactive knowledge and learning environment (IKLE) that integrates three programming and modeling languages to support multiple downstream tasks in the analysis pipeline. To make IKLE easier to use, we have developed algorithms to automate the generation of each language. In addition, we collaborated with domain experts to design and develop a dataflow visualization system, which embeds the automatic language generations into components and allows users to build their analysis pipeline by dragging and connecting components of interest. (B) the overview of our IKLE and its architecture.","keywords":["Learning Environment, Interactive Learning Environments, Programming Language, Visual System, Analysis Pipeline, Patterns In Data, Flow Data, Human-computer Interaction, Food Systems, Information Retrieval, Domain Experts, Language Model, Automatic Generation, Interactive Exploration, Cyberinfrastructure, Pre-trained Language Models, Resource Description Framework, SPARQL Query, DBpedia, Entity Types, Data Visualization, Resilience Analysis, Load Data, Query Results, Supply Chain, Network Flow"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://icicle.osu.edu/sites/default/files/2023-04/An_Interactive_Knowledge_and_Learning_Environment_in_Smart_Foodsheds.pdf","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10091124/v-cga-10091124_Preview.mp4?token=AohQXqEwVCpW2S-BqidwwXDKQKJWz4bjQz4U8-PS4dA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10091124/v-cga-10091124_Preview.srt?token=QpVRyv6MAkpWAPCkp9OJ9zeW0CdQNCxWZuUc7UdI1f8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"g_5lfaP_5eQ","session_youtube_ff_link":"https://youtu.be/g_5lfaP_5eQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=0h37m23s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T16:36:00Z","title":"An Interactive Knowledge and Learning Environment in Smart Foodsheds","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10198358","abstract":"Set visualization facilitates the exploration and analysis of set-type data. However, how sets should be visualized when the data are uncertain is still an open research challenge. To address the problem of depicting uncertainty in set visualization, we ask 1) which aspects of set type data can be affected by uncertainty and 2) which characteristics of uncertainty influence the visualization design. We answer these research questions by first describing a conceptual framework that brings together 1) the information that is primarily relevant in sets (i.e., set membership, set attributes, and element attributes) and 2) different plausible categories of (un)certainty (i.e., certainty, undefined uncertainty as a binary fact, and defined uncertainty as quantifiable measure). Following the structure of our framework, we systematically discuss basic visualization examples of integrating uncertainty in set visualizations. We draw on existing knowledge about general uncertainty visualization and previous evidence of its effectiveness.","accessible_pdf":true,"authors":[{"affiliations":"","email":"christian.tominski@uni-rostock.de","is_corresponding":false,"name":"Christian Tominski"},{"affiliations":"","email":"m.behrisch@uu.nl","is_corresponding":true,"name":"Michael Behrisch"},{"affiliations":"","email":"susanne.bleisch@fhnw.ch","is_corresponding":false,"name":"Susanne Bleisch"},{"affiliations":"","email":"sara.fabrikant@geo.uzh.ch","is_corresponding":false,"name":"Sara Irina Fabrikant"},{"affiliations":"","email":"eva.mayr@donau-uni.ac.at","is_corresponding":false,"name":"Eva Mayr"},{"affiliations":"","email":"miksch@ifs.tuwien.ac.at","is_corresponding":false,"name":"Silvia Miksch"},{"affiliations":"","email":"helen.purchase@monash.edu","is_corresponding":false,"name":"Helen Purchase"}],"award":"","doi":"10.1109/MCG.2023.3300441","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10198358","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10198358","image_caption":"Visualizing uncertainty in set-type data is crucial for accurate analysis and decision-making. This work introduces a framework that categorizes data characteristics and types of uncertainty, providing strategies for integrating uncertainty into visualizations. By addressing set membership, set attributes, and element attributes, the framework helps design effective visual representations that communicate both data and its inherent uncertainties. This approach not only aids in understanding complex datasets but also enhances decision-making in various applications, from academic course planning to complex scenarios like ensemble forecasting and gene mapping.","keywords":["Uncertainty, Data Visualization, Measurement Uncertainty, Visual Analytics, Terminology, Task Analysis, Surveys, Conceptual Framework, Cardinality, Data Visualization, Visual Representation, Measure Of The Amount, Set Membership, Intersection Set, Visual Design, Different Types Of Uncertainty, Missing Values, Visual Methods, Fuzzy Set, Age Of Students, Color Values, Uncertainty Values, Explicit Representation, Aggregate Value, Exact Information, Uncertain Information, Table Cells, Temporal Uncertainty, Uncertain Data, Representation Of Uncertainty, Implicit Representation, Spatial Uncertainty, Point Symbol, Visual Clutter, Color Hue, Graphical Elements, Uncertain Value"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2302.11575","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10198358/v-cga-10198358_Preview.mp4?token=Mgq_Nnpy6uatRSqLhcqCj4lNl75aRjlj9BMREc1eD9Q&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10198358/v-cga-10198358_Preview.srt?token=W8A4RCOde5v9L6Pp5c4soF_xKoPB1ZvUhyZZpbB29kw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"nFYQtRmiwzM","session_youtube_ff_link":"https://youtu.be/nFYQtRmiwzM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=0h48m58s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T16:48:00Z","title":"Visualizing Uncertainty in Sets","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10227838","abstract":"We report a study investigating the viability of using interactive visualizations to aid architectural design with building codes. While visualizations have been used to support general architectural design exploration, existing computational solutions treat building codes as separate from, rather than part of, the design process, creating challenges for architects. Through a series of participatory design studies with professional architects, we found that interactive visualizations have promising potential to aid design exploration and sensemaking in early stages of architectural design by providing feedback about potential allowances and consequences of design decisions. However, implementing a visualization system necessitates addressing the complexity and ambiguity inherent in building codes. To tackle these challenges, we propose various user-driven knowledge management mechanisms for integrating, negotiating, interpreting, and documenting building code rules.","accessible_pdf":false,"authors":[{"affiliations":"","email":"snowak@sfu.ca","is_corresponding":true,"name":"Stan Nowak"},{"affiliations":"","email":"bon.aseniero@autodesk.com","is_corresponding":false,"name":"Bon Adriel Aseniero"},{"affiliations":"","email":"lyn@sfu.ca","is_corresponding":false,"name":"Lyn Bartram"},{"affiliations":"","email":"tovi@dgp.toronto.edu","is_corresponding":false,"name":"Tovi Grossman"},{"affiliations":"","email":"George.fitzmaurice@autodesk.com","is_corresponding":false,"name":"George Fitzmaurice"},{"affiliations":"","email":"justin.matejka@autodesk.com","is_corresponding":false,"name":"Justin Matejka"}],"award":"","doi":"10.1109/MCG.2023.3307971","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10227838","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10227838","image_caption":"Design probes exploring information visualization and broader interactive systems solutions to help architects design with building codes.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10227838/v-cga-10227838_Preview.mp4?token=kj-oA3y_pasQYoWCZtyXzfvynLKMBJFDKneU9PbgfLk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10227838/v-cga-10227838_Preview.srt?token=LMmZ-ChkH0tezmvWvNWVuorm1os61SHGICy2iBqjtp8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"uquxa5bjs8I","session_youtube_ff_link":"https://youtu.be/uquxa5bjs8I","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=1h2m1s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T17:00:00Z","title":"Identifying Visualization Opportunities to Help Architects Manage the Complexity of Building Codes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-9612019","abstract":"The number of online news articles available nowadays is rapidly increasing. When exploring articles on online news portals, navigation is mostly limited to the most recent ones. The spatial context and the history of topics are not immediately accessible. To support readers in the exploration or research of articles in large datasets, we developed an interactive 3D globe visualization. We worked with datasets from multiple online news portals containing up to 45,000 articles. Using agglomerative hierarchical clustering, we represent the referenced locations of news articles on a globe with different levels of detail. We employ two interaction schemes for navigating the viewpoint on the visualization, including support for hand-held devices and desktop PCs, and provide search functionality and interactive filtering. Based on this framework, we explore additional modules for jointly exploring the spatial and temporal domain of the dataset and incorporating live news into the visualization.","accessible_pdf":false,"authors":[{"affiliations":"","email":"nicholas.ingulfsen@gmail.com","is_corresponding":false,"name":"Nicholas Ingulfsen"},{"affiliations":"","email":"simone.schaub@visinf.tu-darmstadt.de","is_corresponding":false,"name":"Simone Schaub-Meyer"},{"affiliations":"","email":"grossm@inf.ethz.ch","is_corresponding":false,"name":"Markus Gross"},{"affiliations":"","email":"tobias.guenther@fau.de","is_corresponding":true,"name":"Tobias G\u00fcnther"}],"award":"","doi":"10.1109/MCG.2021.3127434","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"9612019","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-9612019","image_caption":"Most news websites provide access to only the most recent articles and offer no support to explore the temporal evolution of news. Further, many articles contain the names of places, which would allow to geolocalize and cluster news. With news globe, we provide a visualization system that gives readers the means to explore both the spatial and temporal dimension in a georeferenced context. ","keywords":["News Articles, Number Of Articles, Headlines, Interactive Visualization, Online News, Agglomerative Clustering, Local News, Interactive Exploration, Desktop PC, Different Levels Of Detail, News Portals, Spatial Information, User Study, 3D Space, Human-computer Interaction, Temporal Information, Third Dimension, Tablet Computer, Pie Chart, News Stories, 3D Visualization, Article Details, Visual Point, Bottom Of The Screen, Geospatial Data, Type Of Visualization, Largest Dataset, Tagging Location, Live Feed"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9612019/v-cga-9612019_Preview.mp4?token=GkTT3Eg_EbilgJj-ejZQBPP0XWewvUFObcmJeT41RPs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9612019/v-cga-9612019_Preview.srt?token=SbBNukZ73Zn3zPtlouAYThhFOuvs58j8-yjM0sxCOzY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"lL3SWpaLWQs","session_youtube_ff_link":"https://youtu.be/lL3SWpaLWQs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=0h9m58s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T16:12:00Z","title":"News Globe: Visualization of Geolocalized News Articles","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-9745375","abstract":"We consider the general problem known as job shop scheduling, in which multiple jobs consist of sequential operations that need to be executed or served by appropriate machines having limited capacities. For example, train journeys (jobs) consist of moves and stops (operations) to be served by rail tracks and stations (machines). A schedule is an assignment of the job operations to machines and times where and when they will be executed. The developers of computational methods for job scheduling need tools enabling them to explore how their methods work. At a high level of generality, we define the system of pertinent exploration tasks and a combination of visualizations capable of supporting the tasks. We provide general descriptions of the purposes, contents, visual encoding, properties, and interactive facilities of the visualizations and illustrate them with images from an example implementation in air traffic management. We justify the design of the visualizations based on the tasks, principles of creating visualizations for pattern discovery, and scalability requirements. The outcomes of our research are sufficiently general to be of use in a variety of applications.","accessible_pdf":false,"authors":[{"affiliations":"","email":"gennady.andrienko@iais.fraunhofer.de","is_corresponding":true,"name":"Gennady Andrienko"},{"affiliations":"","email":"natalia.andrienko@iais.fraunhofer.de","is_corresponding":false,"name":"Natalia Andrienko"},{"affiliations":"","email":"jmcordero@e-crida.enaire.es","is_corresponding":false,"name":"Jose Manuel Cordero Garcia"},{"affiliations":"","email":"dirk.hecker@iais.fraunhofer.de","is_corresponding":false,"name":"Dirk Hecker"},{"affiliations":"","email":"georgev@unipi.gr","is_corresponding":false,"name":"George A. Vouros"}],"award":"","doi":"10.1109/MCG.2022.3163437","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"9745375","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-9745375","image_caption":"Example of a schedule view showing three versions of a schedule","keywords":["Visualization, Schedules, Task Analysis, Optimization, Job Shop Scheduling, Data Analysis, Processor Scheduling, Iterative Methods"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://openaccess.city.ac.uk/id/eprint/28062/","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9745375/v-cga-9745375_Preview.mp4?token=Fdm_ecxXarixBi1Z9WgSBNT5yfnA0tluZk-dbQApEz4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9745375/v-cga-9745375_Preview.srt?token=2W4yg6vklLtn31kpj4gAMAAeNRrh_cCPFAD5iM9OjKo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"wj0IQ4MZIGs","session_youtube_ff_link":"https://youtu.be/wj0IQ4MZIGs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=0h1m18s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T16:00:00Z","title":"Supporting Visual Exploration of Iterative Job Scheduling","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-9866547","abstract":"In many applications, developed deep-learning models need to be iteratively debugged and refined to improve the model efficiency over time. Debugging some models, such as temporal multilabel classification (TMLC) where each data point can simultaneously belong to multiple classes, can be especially more challenging due to the complexity of the analysis and instances that need to be reviewed. In this article, focusing on video activity recognition as an application of TMLC, we propose DETOXER, an interactive visual debugging system to support finding different error types and scopes through providing multiscope explanations.","accessible_pdf":false,"authors":[{"affiliations":"","email":"m.nourani@northeastern.edu","is_corresponding":true,"name":"Mahsan Nourani"},{"affiliations":"","email":"chiradeep.roy@utdallas.edu","is_corresponding":false,"name":"Chiradeep Roy"},{"affiliations":"","email":"dhoneycutt@ufl.edu","is_corresponding":false,"name":"Donald R. Honeycutt"},{"affiliations":"","email":"eragan@ufl.edu","is_corresponding":false,"name":"Eric D. Ragan"},{"affiliations":"","email":"vibhav.gogate@utdallas.edu","is_corresponding":false,"name":"Vibhav Gogate"}],"award":"","doi":"10.1109/MCG.2022.3201465","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"9866547","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-9866547","image_caption":"Overview of DETOXER, a visual (de)bugging (to)ol with Multi-Scope E(x)planations for (er)ror detection in Temporal Multi-Label Classification. In the center, a video is selected for exploration. Directly under the progress bar, heatmaps demonstrate the model\u2019s confidence for any given label per second (frame-level explanations)-(C). On the left, available videos are shown; for each video, the tool shows top-5 detected labels (A) and the rate of FP and FN errors (B) in the video (video-level explanations). The selected video is emphasized with a blue background. On the right, a global information panel displays model performance metrics (D) and object-specific FN and FP error rates in two vertically adjacent bar charts (E) (Global-level explanations).","keywords":["Debugging, Analytical Models, Heating Systems, Data Models, Computational Modeling, Activity Recognition, Deep Learning, Multi Label Classification, Visualization Tool, Temporal Classification, Visual Debugging, False Positive, False Negative, Active Components, Deep Learning Models, Types Of Errors, Video Frames, Error Detection, Detection Of Types, Action Recognition, Interactive Visualization, Sequence Of Points, Design Goals, Positive Errors, Critical Outcomes, Error Patterns, Global Panel, False Negative Rate, False Positive Rate, Heatmap, Visual Approach, Truth Labels, True Positive, Confidence Score, Anomaly Detection, Interface Elements"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9866547/v-cga-9866547_Preview.mp4?token=B9Wp229HHlCcfq9D9Qr7a55Xu9ESLWtT3VHomkFEYTg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9866547/v-cga-9866547_Preview.srt?token=tPdcwmd1QEjEtJ9-F3m_oFN3MZGg13Pzbvi5ghWN3Ns&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"6eBUBzR5Zlc","session_youtube_ff_link":"https://youtu.be/6eBUBzR5Zlc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=0h23m32s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T16:24:00Z","title":"DETOXER: A Visual Debugging Tool With Multiscope Explanations for Temporal Multilabel Classification","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10078374","abstract":"Existing dynamic weighted graph visualization approaches rely on users\u2019 mental comparison to perceive temporal evolution of dynamic weighted graphs, hindering users from effectively analyzing changes across multiple timeslices. We propose DiffSeer, a novel approach for dynamic weighted graph visualization by explicitly visualizing the differences of graph structures (e.g., edge weight differences) between adjacent timeslices. Specifically, we present a novel nested matrix design that overviews the graph structure differences over a time period as well as shows graph structure details in the timeslices of user interest. By collectively considering the overall temporal evolution and structure details in each timeslice, an optimization-based node reordering strategy is developed to group nodes with similar evolution patterns and highlight interesting graph structure details in each timeslice. We conducted two case studies on real-world graph datasets and in-depth interviews with 12 target users to evaluate DiffSeer. The results demonstrate its effectiveness in visualizing dynamic weighted graphs.","accessible_pdf":false,"authors":[{"affiliations":"","email":"wenxiaolin@stu.scu.edu.cn","is_corresponding":true,"name":"Xiaolin Wen"},{"affiliations":"","email":"yongwang@smu.edu.sg","is_corresponding":false,"name":"Yong Wang"},{"affiliations":"","email":"wumeixuan@stu.scu.edu.cn","is_corresponding":false,"name":"Meixuan Wu"},{"affiliations":"","email":"wangfengjie@stu.scu.edu.cn","is_corresponding":false,"name":"Fengjie Wang"},{"affiliations":"","email":"xuanwu.yue@connect.ust.hk","is_corresponding":false,"name":"Xuanwu Yue"},{"affiliations":"","email":"shenqm@sustech.edu.cn","is_corresponding":false,"name":"Qiaomu Shen"},{"affiliations":"","email":"mayx@sustech.edu.cn","is_corresponding":false,"name":"Yuxin Ma"},{"affiliations":"","email":"zhumin@scu.edu.cn","is_corresponding":false,"name":"Min Zhu"}],"award":"","doi":"10.1109/MCG.2023.3248289","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10078374","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10078374","image_caption":"Overview of DiffSeer: We focus on explicitly visualizing the differences between adjacent timeslices to support the analysis of the dynamic weighted graph evolution over a long time. Specifically, we proposed a nested matrix design, including (A) an overview matrix to provide a visual summary of differences and two types (B, C) of detail matrices to enable interactive inspection of graph details on demand. An optimization- based node reordering strategy is incorporated in the nested matrix design to group together nodes with similar evolution patterns and highlight interesting graph structure details in each timeslice.","keywords":["Visibility Graph, Spatial Patterns, Weight Change, In-depth Interviews, Temporal Changes, Temporal Evolution, Negative Changes, Interesting Patterns, Edge Weights, Real-world Datasets, Graph Structure, Visual Approach, Dynamic Visualization, Dynamic Graph, Financial Networks, Graph Datasets, Similar Evolutionary Patterns, User Interviews, Similar Changes, Chinese New Year, Sector Indices, Original Graph, Red Rectangle, Nodes In Order, Stock Market Crash, Stacked Bar Charts, Different Types Of Matrices, Chinese New, Blue Rectangle"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2302.07609","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10078374/v-cga-10078374_Preview.mp4?token=GPwpi68jEh0kOeIhl55NTOpGkzJSkquipYwccv1aRxY&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"YpfkEg3bHfE","session_youtube_ff_link":"https://youtu.be/YpfkEg3bHfE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=0h0m4s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T16:00:00Z","title":"DiffSeer: Difference-Based Dynamic Weighted Graph Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10128890","abstract":"Some 15 years ago, Visualization Viewpoints published an influential article titled Rainbow Color Map (Still) Considered Harmful (Borland and Taylor, 2007). The paper argued that the \u201crainbow colormap\u2019s characteristics of confusing the viewer, obscuring the data and actively misleading interpretation make it a poor choice for visualization.\u201d Subsequent articles often repeat and extend these arguments, so much so that avoiding rainbow colormaps, along with their derivatives, has become dogma in the visualization community. Despite this loud and persistent recommendation, scientists continue to use rainbow colormaps. Have we failed to communicate our message, or do rainbow colormaps offer advantages that have not been fully appreciated? We argue that rainbow colormaps have properties that are underappreciated by existing design conventions. We explore key critiques of the rainbow in the context of recent research to understand where and how rainbows might be misunderstood. Choosing a colormap is a complex task, and rainbow colormaps can be useful for selected applications.","accessible_pdf":false,"authors":[{"affiliations":"","email":"cware@ccom.unh.edu","is_corresponding":false,"name":"Colin Ware"},{"affiliations":"","email":"mstone@acm.org","is_corresponding":true,"name":"Maureen Stone"},{"affiliations":"","email":"danielle.szafir@cs.unc.edu","is_corresponding":false,"name":"Danielle Albers Szafir"}],"award":"","doi":"10.1109/MCG.2023.3246111","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10128890","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10128890","image_caption":"Rainbow colormaps have long been criticized, especially when shape-from-shading is required (upper left). But domain experts continue to use them, especially for highlighting specific values and global patterns (lower left). Classic rainbows have uneven hue distribution and erratic luminance profiles. But it is possible to craft rainbow colormaps that avoid these problems. (upper right).Placing hues on key values can create a useful \u201ccolor ruler.\u201d (lower right) We understand well enough why rainbows can be bad; let us instead work to find out when and why they are good. ","keywords":["Image Color Analysis, Semantics, Data Visualization, Estimation, Reliability Engineering"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10128890/v-cga-10128890_Preview.mp4?token=MdjcNS3-czC61d_9XyQpByoEmEXu9H8Re4F1GSKFbiw&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"LJhB4o315nU","session_youtube_ff_link":"https://youtu.be/LJhB4o315nU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=0h11m29s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T16:12:00Z","title":"Rainbow Colormaps Are Not All Bad","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10201383","abstract":"Although visualizations are a useful tool for helping people to understand information, they can also have unintended effects on human cognition. This is especially true for uncertain information, which is difficult for people to understand. Prior work has found that different methods of visualizing uncertain information can produce different patterns of decision making from users. However, uncertainty can also be represented via text or numerical information, and few studies have systematically compared these types of representations to visualizations of uncertainty. We present two experiments that compared visual representations of risk (icon arrays) to numerical representations (natural frequencies) in a wildfire evacuation task. Like prior studies, we found that different types of visual cues led to different patterns of decision making. In addition, our comparison of visual and numerical representations of risk found that people were more likely to evacuate when they saw visualizations than when they saw numerical representations. These experiments reinforce the idea that design choices are not neutral: seemingly minor differences in how information is represented can have important impacts on human risk perception and decision making.","accessible_pdf":true,"authors":[{"affiliations":"","email":"lematze@sandia.gov","is_corresponding":true,"name":"Laura E. Matzen"},{"affiliations":"","email":"bchowel@sandia.gov","is_corresponding":false,"name":"Breannan C. Howell"},{"affiliations":"","email":"mctrumb@sandia.gov","is_corresponding":false,"name":"Michael C. S. Trumbo"},{"affiliations":"","email":"kmdivis@sandia.gov","is_corresponding":false,"name":"Kristin M. Divis"}],"award":"","doi":"10.1109/MCG.2023.3299875","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10201383","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10201383","image_caption":"This figure shows stimuli from an experiment comparing two representations of probability: natural frequencies and icon arrays. Although these representations convey the same information, the visual cues provided by the icon arrays can change people's perception of the risk.","keywords":["Visualization, Uncertainty, Decision Making, Costs, Task Analysis, Laboratories, Information Analysis, Decision Making, Visual Representation, Numerical Representation, Decision Patterns, Deterministic, Risk Perception, Specific Information, Fundamental Frequency, Point Values, Representation Of Information, Risk Information, Visual Conditions, Numerous Conditions, Human Decision, Numerical Information, Impact Of Different Types, Uncertain Information, Type Of Visualization, Differences In Risk Perception, Representation Of Uncertainty, Increase In Participation, Participants In Experiment, Individual Difference Measures, Sandia National Laboratories, Risk Propensity, Bonus Payments, Average Response Time, Difference In Probability, Response Time"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10201383/v-cga-10201383_Preview.mp4?token=vTrP56YmWW_mcWhejPKjE3Jm7h8whXcyMJQ7mGIfOYg&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"WMGfURPRFEg","session_youtube_ff_link":"https://youtu.be/WMGfURPRFEg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=0h37m8s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T16:36:00Z","title":"Numerical and Visual Representations of Uncertainty Lead to Different Patterns of Decision Making","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10207831","abstract":"The membership function is to categorize quantities along with a confidence degree. This article investigates a generic user interaction based on this function for categorizing various types of quantities without modification, which empowers users to articulate uncertainty categorization and enhance their visual data analysis significantly. We present the technique design and an online prototype, supplementing with insights from three case studies that highlight the technique\u2019s efficacy among different types of quantities. Furthermore, we conduct a formal user study to scrutinize the process and reasoning users employ while utilizing our technique. The findings indicate that our technique can help users create customized categories. Both our code and the interactive prototype are made available as open-source resources, intended for application across varied domains as a generic tool.","accessible_pdf":true,"authors":[{"affiliations":"","email":"liuliqun.cs@gmail.com","is_corresponding":true,"name":"Liqun Liu"},{"affiliations":"","email":"romain.vuillemot@ec-lyon.fr","is_corresponding":false,"name":"Romain Vuillemot"}],"award":"","doi":"10.1109/MCG.2023.3301449","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10207831","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10207831","image_caption":"The illustration of an interactive membership function. Users can change the shape of the membership function by dragging the black points in (a) to adjust the range of the categories (Children, Youth, Adult, and Old). This interactive membership function helps users map the quantities (column Age) into categories (column Categories). The table in (b) shows the membership degrees derived from the membership function.","keywords":["Data Visualization, Uncertainty, Prototypes, Fuzzy Logic, Image Color Analysis, Fuzzy Sets, Open Source Software, General Function, Membership Function, User Study, Classification Process, Fuzzy Logic, Quantitative Values, Visualization Techniques, Amount Of Type, Fuzzy Theory, General Interaction, Temperature Dataset, Interaction Techniques, Carbon Dioxide, Computation Time, Rule Based, Web Page, Real World Scenarios, Fuzzy Set, Domain Experts, Supercritical CO 2, Parallel Coordinates, Fuzzy System, Fuzzy Clustering, Interactive Visualization, Amount Of Items, Large Scale Problems"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-04241000/document","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10207831/v-cga-10207831_Preview.mp4?token=bxQnsCspXz7hbX4rJka9sDjNpl6wHU5a4_ah1whqqos&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10207831/v-cga-10207831_Preview.srt?token=q77LkQdAuZz_5yVfqb8pfC-ovH7zm2jYr4K4qLxQw-o&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"zfGGIlFz_-s","session_youtube_ff_link":"https://youtu.be/zfGGIlFz_-s","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=0h24m18s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T16:24:00Z","title":"A Generic Interactive Membership Function for Categorization of Quantities","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10414267","abstract":"Traditional approaches to data visualization have often focused on comparing different subsets of data, and this is reflected in the many techniques developed and evaluated over the years for visual comparison. Similarly, common workflows for exploratory visualization are built upon the idea of users interactively applying various filter and grouping mechanisms in search of new insights. This paradigm has proven effective at helping users identify correlations between variables that can inform thinking and decision-making. However, recent studies show that consumers of visualizations often draw causal conclusions even when not supported by the data. Motivated by these observations, this article highlights recent advances from a growing community of researchers exploring methods that aim to directly support visual causal inference. However, many of these approaches have their own limitations, which limit their use in many real-world scenarios. This article, therefore, also outlines a set of key open challenges and corresponding priorities for new research to advance the state of the art in visual causal inference.","accessible_pdf":false,"authors":[{"affiliations":"","email":"borland@renci.org","is_corresponding":false,"name":"David Borland"},{"affiliations":"","email":"zeyuwang@cs.unc.edu","is_corresponding":false,"name":"Arran Zeyu Wang"},{"affiliations":"","email":"gotz@unc.edu","is_corresponding":false,"name":"David Gotz"}],"award":"","doi":"10.1109/MCG.2023.3338788","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10414267","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10414267","image_caption":"A counterfactual subset includes data points from the excluded set that closely resemble those in the included set. Previous research indicates that visualizations comparing the counterfactual subset with the included subset (c) lead to more accurate causal inferences than traditional methods (b). This work will share our vision for how counterfactual concepts developed by the causal inference community can be leveraged to enable the development of more effective visualization technologies.","keywords":["Analytical Models, Correlation, Visual Analytics, Decision Making, Data Visualization, Reliability Theory, Cognition, Inference Algorithms, Causal Inference, Causality, Social Media, Exploratory Analysis, Data Visualization, Visual Representation, Visual Analysis, Visualization Tool, Open Challenges, Interactive Visualization, Assembly Line, Different Subsets Of Data, Visual Analytics Tool, Data Driven Decision Making, Data Quality, Statistical Models, Causal Effect, Visual System, Use Of Social Media, Bar Charts, Causal Model, Causal Graph, Chart Types, Directed Acyclic Graph, Visual Design, Portion Of The Dataset, Causal Structure, Prior Section, Causal Explanations, Line Graph"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2401.08411","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10414267/v-cga-10414267_Preview.mp4?token=FVxCuB6xLp1DUV6Sm1Gh4cxqFbnyw2q5gHteaBAiyUw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10414267/v-cga-10414267_Preview.srt?token=CY0_DiPO-Mt3EyALCu_X6qEaEG2xb9-IPY8DehrAEN0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"N6USrLE8yfo","session_youtube_ff_link":"https://youtu.be/N6USrLE8yfo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=0h50m24s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T16:48:00Z","title":"Using Counterfactuals to Improve Causal Inferences From Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10478355","abstract":"Recent developments in artificial intelligence (AI) and machine learning (ML) have led to the creation of powerful generative AI methods and tools capable of producing text, code, images, and other media in response to user prompts. Significant interest in the technology has led to speculation about what fields, including visualization, can be augmented or replaced by such approaches. However, there remains a lack of understanding about which visualization activities may be particularly suitable for the application of generative AI. Drawing on examples from the field, we map current and emerging capabilities of generative AI across the different phases of the visualization lifecycle and describe salient opportunities and challenges.","accessible_pdf":true,"authors":[{"affiliations":"","email":"rahul.basole@accenture.com","is_corresponding":false,"name":"Rahul C. Basole"},{"affiliations":"","email":"timothy.major@accenture.com","is_corresponding":true,"name":"Timothy Major"}],"award":"","doi":"10.1109/MCG.2024.3362168","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10478355","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10478355","image_caption":"The iterative phases of the end-to-end visualization workflow (A-G) and types of generative AI opportunities (Creativity, Co-Pilot, and Automation) within them.","keywords":["Generative AI, Art, Artificial Intelligence, Machine Learning, Visualization, Media, Augmented Reality, Machine Learning, Visual Representation, Professional Knowledge, Creative Process, Domain Experts, Generalization Capability, Development Of Artificial Intelligence, Artificial Intelligence Capabilities, Iterative Process, Natural Language, Commercial Software, Hallucinations, Team Sports, Design Requirements, Intelligence Agencies, Recommender Systems, User Requirements, Iterative Design, Use Of Artificial Intelligence, Visual Design, Phase Assemblage, Data Literacy"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10478355/v-cga-10478355_Preview.mp4?token=bntoA5E5ZfCGu-KJFGG0JyRkowZQF3EIwF7Kt-WhyqU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10478355/v-cga-10478355_Preview.srt?token=Z5yTk4Adf9lkH6U9OqZvHtSDFifTio2hnkzEt1-bZuE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"UDI3JoGu2Qs","session_youtube_ff_link":"https://youtu.be/UDI3JoGu2Qs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=1h0m30s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T17:00:00Z","title":"Generative AI for Visualization: Opportunities and Challenges","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-2860","abstract":"Visualization of spatial datasets is essential for understanding biological systems that are composed of several interacting cell types. For example, gene expression data at the molecular level needs to be interpreted based on cell type, spatial context, tissue type, and interactions with the surrounding environment. Recent advances in spatial profiling technologies allow measurements of the level of thousands of proteins or genes at different spatial locations along with corresponding cellular composition. Representing such high dimensional data effectively to facilitate data interpretation is a major challenge. Existing methods such as spatially plotted pie or dot charts obscure underlying tissue regions and necessitate switching between different views for accurate interpretations. Here, we present TissuePlot, a novel method for visualizing spatial data at molecular, cellular and tissue levels in the context of their spatial locations. To this end, TissuePlot employs a transparent hexagon tessellation approach that utilizes object borders to represent cell composition or gene-level data without obscuring the underlying tissue image. Additionally, it offers a multi-view interactive web app, that allows interrogating spatial tissue data at multiple scales linking molecular information to tissue anatomy and motifs. We demonstrate TissuePlot utility using mouse brain data from the Bio+MedVis Redesign Challenge 2024. Our tool is accessible at https://sailem-group.github.io/TissuePlot.","accessible_pdf":false,"authors":[{"affiliations":["King's College London, London, United Kingdom"],"email":"heba.sailem@kcl.ac.uk","is_corresponding":true,"name":"Heba Zuhair Sailem"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-2860","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=1h9m14s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"TissuePlot: A Multi-Scale Interactive Web App For Visualizing Spatial Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-3099","abstract":"For the Bio+Med-Vis Challenge 2024, we propose a visual analytics system as a redesign for the scatter pie chart visualization of cell type proportions of spatial transcriptomics data. Our design uses three linked views: a view of the histological image of the tissue, a stacked bar chart showing cell type proportions of the spots, and a scatter plot showing a dimensionality reduction of the multivariate proportions. Furthermore, we apply a compositional data analysis framework, the Aitchison geometry, to the proportions for dimensionality reduction and k-means clustering. Leveraging brushing and linking, the system allows one to explore and uncover patterns in the cell type mixtures and relate them to their spatial locations on the cellular tissue. This redesign shifts the pattern recognition workload from the human visual system to computational methods commonly used in visual analytics. We provide the code and setup instructions of our visual analytics system on GitHub.(https://github.com/UniStuttgart-VISUS/va-for-spatial-transcriptomics)","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"david.haegele@visus.uni-stuttgart.de","is_corresponding":true,"name":"David H\u00e4gele"},{"affiliations":["University of Stuttgart , Stuttgart , Germany"],"email":"st189806@stud.uni-stuttgart.de","is_corresponding":false,"name":"Yuxuan Tang"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-3099","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=1h22m23s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"Visual Compositional Data Analytics for Spatial Transcriptomics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-4384","abstract":"We introduce a novel method for overlaying cell type proportion data onto tissue images. This approach preserves spatial context while avoiding visual clutter or excessively obscuring the underlying slide. Our proposed technique involves clustering the data and aggregating neighboring points of the same cluster into polygons.","accessible_pdf":false,"authors":[{"affiliations":["NIH, Rockville, United States","Queen's University, Belfast, United Kingdom"],"email":"masonlk@nih.gov","is_corresponding":true,"name":"Lee Mason"},{"affiliations":["National Institutes of Health, Rockville, United States"],"email":"jonas.dealmeida@nih.gov","is_corresponding":false,"name":"Jonas S Almeida"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-4384","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=0h56m24s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"A Simplified Positional Cell Type Visualization using Spatially Aggregated Clusters","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-4393","abstract":"The 3D Cycled Immunofluorescence (CyCIF) technique produces high-resolution multiplexed images, often representing a large number of biomarkers. With current visualization tools, it is hard to identify the important subset of markers and locate notable regions within the tissue. To address this challenge, we propose an LLM-supported agent to navigate 3D CyCIF Imaging that interprets a novice user's natural language queries, identifies relevant markers, and locates significant regions within the tissue. Our results demonstrate the agent's ability to dynamically update views, answering various queries, from general questions to specific region-based requests.","accessible_pdf":false,"authors":[{"affiliations":["The University of Texas at Arlington, Arlington, United States"],"email":"acd9300@mavs.uta.edu","is_corresponding":true,"name":"Aarti Darji"},{"affiliations":["DBMI, Boston, United States"],"email":"ericmoerth@g.harvard.edu","is_corresponding":false,"name":"Eric Moerth"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"morgan_turner@hms.harvard.edu","is_corresponding":false,"name":"Morgan L Turner"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"david_kouril@hms.harvard.edu","is_corresponding":false,"name":"David Kou\u0159il"},{"affiliations":["The University of Texas at Arlington, Arlington, United States"],"email":"jacob.luber@uta.edu","is_corresponding":false,"name":"Jacob Luber"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-4393","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=1h36m18s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"LLM - Supported Exploration of 3D Microscopy Imaging","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-8493","abstract":"The objective of the Redesign Challenge of the Bio+MedVis Challenge @ IEEE VIS 2024 is to redesign an existing visualization of multi-cell gene expressions of tissue samples. In this, multiple cells are accumulated into pixels. For each pixel the visualization should convey the prevalence and extent of cell types it is composed of, i.e., a proportional relation. The provided baseline technique of superimposed Pie charts -- a common technique for this kind of relation -- is not an ideal choice as the cell-type quantities of neighboring pixels are hard to compare due to a spatial disarray inherent to pie charts. This limits the perception of regions with coherent cell-type compositions, which constitutes one of the essential visual analytics tasks. We propose a novel marker design: \\emph{Droplets} -- a space-saving design for visually enhancing the presence of clusters and regional borders. We evaluate this concept for the given tissue sample and compare it to the given baseline and other alternatives.","accessible_pdf":false,"authors":[{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"s.lengauer@cgv.tugraz.at","is_corresponding":true,"name":"Stefan Lengauer"},{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"peter.waldert@cgv.tugraz.at","is_corresponding":false,"name":"Peter Waldert"},{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"tobias.schreck@cgv.tugraz.at","is_corresponding":false,"name":"Tobias Schreck"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-8493","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=1h32m33s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"Droplets: A Marker Design for visually enhancing Local Cluster Association","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-9833","abstract":"Spatial transcriptomics methods capture cellular measurements such as gene expression and cell types at specific locations in a cell, helping provide a localized picture of tissue health. Traditional visualization techniques superimpose the tissue image with pie charts for the cell distribution. We design an interactive visual analysis system that addresses perceptual problems in the state of the art, while adding filtering, drilling, and clustering analysis capabilities. Our approach can help researchers gain deeper insights into the molecular mechanisms underlying complex biological processes within tissues.","accessible_pdf":false,"authors":[{"affiliations":["University of Illinois Chicago, Chicago, United States"],"email":"szhao69@uic.edu","is_corresponding":true,"name":"Siyuan Zhao"},{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"g.elisabeta.marai@gmail.com","is_corresponding":false,"name":"G. Elisabeta Marai"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-9833","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=1h42m41s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"A Part-to-Whole Circular Cell Explorer","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1002","abstract":"We present an interactive visual analysis tool to explore large dynamic graphs. Our system provides users with multiple perspectives to analyze the network. The graph view presents the node-link structure and offers various layout options. To complement, a temporal view shows both the overall temporal distribution and detailed event timelines. The system also supports flexible filtering to reduce the graph size and identify interesting entities. One bonus feature of our system is the provenance map, which visualizes the automatically captured user interactions and allows users to record their findings. The provenance map is helpful for organizing the exploration process and synthesizing analysis results.","accessible_pdf":false,"authors":[{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"yuhan.guo@pku.edu.cn","is_corresponding":true,"name":"Yuhan Guo"},{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"luoyuchu@pku.edu.cn","is_corresponding":false,"name":"Yuchu Luo"},{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"cxyapril@stu.pku.edu.cn","is_corresponding":false,"name":"Xinyue Chen"},{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"hanning.shao@pku.edu.cn","is_corresponding":false,"name":"Hanning Shao"},{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"xiaoru.yuan@pku.edu.cn","is_corresponding":false,"name":"Xiaoru Yuan"},{"affiliations":["University of Nottingham, Nottingham, United Kingdom","University of Nottingham, Nottingham, United Kingdom"],"email":"kai.xu@nottingham.ac.uk","is_corresponding":false,"name":"Kai Xu"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1002","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=2h2m54s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Visual Analysis of Complex Temporal Networks Supported by Analytic Provenance","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1006","abstract":"The exposure of illegal fishing by SouthSeafood Express Corp highlights the urgent need for better tools to monitor commercial fishing in Oceanus. In response, we develop an interactive visualization tool for the VAST Challenge\u2019s Mini-Challenge 2. Our system analyzes the CatchNet knowledge graph, combining vessel tracking and port records from FishEye International, a non-profit dedicated to combating illegal fishing. The tool links vessels to probable cargos, identifies seasonal trends, and detects anomalies in port records. Detects suspicious activity of vessels, offering actionable insights to aid investigations and prevent future illegal fishing.","accessible_pdf":false,"authors":[{"affiliations":["Getulio Vargas Foundation, Rio de Janeiro, Brazil"],"email":"jherediaparillo@gmail.com","is_corresponding":false,"name":"Juanpablo Andrew Heredia"},{"affiliations":["Get\u00falio Vargas Foundation, Rio de Janeiro, Brazil"],"email":"fabricio.venturim@fgv.edu.br","is_corresponding":false,"name":"Fabr\u00edcio Venturim"},{"affiliations":["Funda\u00e7\u00e3o Getulio Vargas, Rio de Janeiro, Brazil"],"email":"dany.diaz@ucsp.edu.pe","is_corresponding":false,"name":"Dany Mauro Diaz Espino"},{"affiliations":["FGV, Rio de Janeiro, Brazil"],"email":"felipe.moreno.vera@gmail.com","is_corresponding":false,"name":"Felipe Moreno-Vera"},{"affiliations":["Funda\u00e7\u00e3o Getulio Vargas, Rio de Janeiro, Brazil"],"email":"jpocom@gmail.com","is_corresponding":false,"name":"Jorge Poco"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1006","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Prerecorded video (VAST Challenge submission ID 1004)","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1013","abstract":"This paper addresses the visualization challenges posed by Mini Challenge 3 of the VAST Challenge 2024, which involves detecting illegal fishing activities within a dynamic network of companies and individuals. The task requires effective anomaly detection in a time-dependent knowledge graph, a scenario where conventional graph visualization tools often fall short due to their limited ability to integrate temporal data and the undefined nature of the anomalies. We demonstrate how to overcome these challenges through well-crafted views implemented in standard software libraries. Our approach involves decomposing the time-dependent knowledge graph into separate time and structure components, as well as providing data-driven guidance for identifying anomalies. These components are then interconnected through extensive interactivity, enabling exploration of anomalies in a complex, temporally evolving network. The source code and a demonstration video are publicly available at github.com/MaAllma/Temporal/Knowledge/Graph/Analysis.","accessible_pdf":false,"authors":[{"affiliations":["RPTU in Kaiserslautern, Kaiserslautern, Germany"],"email":"allmann@rhrk.uni-kl.de","is_corresponding":false,"name":"Magdalena Allmann"},{"affiliations":["RPTU in Kaiserslautern, Kaiserslautern, Germany"],"email":"iselborn@rptu.de","is_corresponding":true,"name":"Kevin Iselborn"},{"affiliations":["University of Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"j_sohns12@cs.uni-kl.de","is_corresponding":false,"name":"Jan-Tobias Sohns"},{"affiliations":["University of Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"leitte@cs.uni-kl.de","is_corresponding":false,"name":"Heike Leitte"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1013","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=2h18m42s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Visual Anomaly Detection in Temporal Knowledge Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1016","abstract":"This paper presents the comprehensive analysis and visualizations developed by the FES-MC2-1 team for the VAST Challenge 2024, Mini-Challenge 2. The challenge required us to analyze port exit records, transponder ping data, and cargo delivery reports to asso- ciate vessels with their probable cargos, identify seasonal trends and anomalies, and detect illegal fishing activities by SouthSeafood Express Corp vessels. Utilizing a combination of advanced visual analytics tools\u2014including Tableau, Python, React, Docker, Postgresql, Nginx and custom-developed solutions from the University of Konstanz\u2014our team uncovered patterns in the data that reveal suspicious activities and significant shifts in fishing behavior following the crackdown on illegal operations.","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"sinem-bilge.gueler@uni-konstanz.de","is_corresponding":true,"name":"Sinem Bilge Guler"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"mehmet-emre.sahin@uni-konstanz.de","is_corresponding":false,"name":"Mehmet Emre Sahin"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"funda.yildiz-aydin@uni-konstanz.de","is_corresponding":false,"name":"Funda Yildiz-Aydin"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"u.schlegel@uni-konstanz.de","is_corresponding":false,"name":"Udo Schlegel"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1016","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"VAST 2024-MC2 Challenge","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1018","abstract":"In this work, we present a visual analytics approach designed to address the 2024 VAST Challenge Mini-Challenge 1, which focuses on detecting bias in a knowledge graph. Our solution utilizes pixel-based visualizations to explore patterns within the knowledge graph, CatchNet, which is employed to identify potential illegal fishing activities. CatchNet is constructed by FishEye analysts who aggregate open-source data, including news articles and public reports. They have recently begun incorporating knowledge extracted from these sources using advanced language models. Our method combines pixel-based visualizations with ordering techniques and sentiment analysis to uncover hidden patterns in both the news articles and the knowledge graph. Notably, our analysis reveals that news articles covering critiques and convictions of companies are subject to elevated levels of bias.","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"raphael.buchmueller@uni-konstanz.de","is_corresponding":false,"name":"Raphael Buchm\u00fcller"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"daniel.fuerst@uni-konstanz.de","is_corresponding":true,"name":"Daniel F\u00fcrst"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"alexander.frings@uni-konstanz.de","is_corresponding":false,"name":"Alexander Frings"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"u.schlegel@uni-konstanz.de","is_corresponding":false,"name":"Udo Schlegel"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1018","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=0h29m10s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"UKON-Buchmueller-MC1","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1019","abstract":"The SunSpot project is a comprehensive solution to address the 2024 IEEE VAST Challenge MC2, focusing on detecting abnormal vessel activities. Our method integrated data on fishing records, vessel trajectories, commodity-vessel relationships, and fish distributions. We created a set of visualizations to help analysts better understand the characteristics of the area, vessels, and fishing activities. We considered a vessel\u2019s departure from and return to a harbor as a basic cycle of activity and classified these cycles into patterns based on location and dwell time. By visualizing the spatial and temporal aspects of these cycles, we effectively distinguished illegal fishing from normal fishing activities. Our solution highlights the strengths of a multidirectional approach in data analytics, incorporating vessel information, fish origins, exported commodities, and shipping ports.","accessible_pdf":false,"authors":[{"affiliations":["West Lafayette Jr./Sr. High School, West Lafayette, United States"],"email":"ashleywqyang@gmail.com","is_corresponding":false,"name":"Ashley Yang"},{"affiliations":["Purdue University, WEST LAFAYETTE, United States"],"email":"wang5329@purdue.edu","is_corresponding":true,"name":"Hao Wang"},{"affiliations":["Northeastern University, Boston, United States"],"email":"yqq1960582321@gmail.com","is_corresponding":false,"name":"Qianlai Yang"},{"affiliations":["Purdue University, West Lafayette, United States"],"email":"yang2767@purdue.edu","is_corresponding":false,"name":"Qi Yang"},{"affiliations":["Purdue University, West Lafayette, United States"],"email":"gong224@purdue.edu","is_corresponding":false,"name":"Ziqian Gong"},{"affiliations":["Purdue University, West Lafayette, United States"],"email":"zhou1471@purdue.edu","is_corresponding":false,"name":"Zizun Zhou"},{"affiliations":["Purdue University, West Lafayette, United States"],"email":"qianz@purdue.edu","is_corresponding":false,"name":"Zhenyu Cheryl Qian"},{"affiliations":["Purdue University, West Lafayette, United States"],"email":"victorchen@purdue.edu","is_corresponding":false,"name":"Yingjie Victor Chen"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1019","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=1h29m9s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Purdue-Chen-MC2","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1021","abstract":"In this paper we present an interactive visualization system for solving IEEE VAST Challenge 2024 Mini-Challenge 1. Our system enables interactive exploration and mining of the knowledge graph, assists in identifying suspicious bias and provides corresponding evidence from multiple perspectives. For the convenience of user exploration, our system supports recording the exploration process and preservation of evidence. The illustrative case proves the effectiveness of our system.","accessible_pdf":false,"authors":[{"affiliations":["Fudan University, Shanghai, China"],"email":"qiuttt@foxmail.com","is_corresponding":true,"name":"Tian Qiu"},{"affiliations":["Fudan University, Shanghai, China"],"email":"20302010026@fudan.edu.cn","is_corresponding":false,"name":"Yi Shan"},{"affiliations":["Fudan University, Shanghai, China"],"email":"3504936154@qq.com","is_corresponding":false,"name":"Xueli Shu"},{"affiliations":["Fudan University, Shanghai, China"],"email":"philipethanzg@gmail.com","is_corresponding":false,"name":"Aolin Guo"},{"affiliations":["Fudan University, Shanghai, China"],"email":"18812571619@163.com","is_corresponding":false,"name":"Qianhui Li"},{"affiliations":["school of data science, Shanghai , China"],"email":"guomeng200210@163.com","is_corresponding":false,"name":"Meng Guo"},{"affiliations":["Fudan University, Shanghai, China"],"email":"simingchen3@gmail.com","is_corresponding":false,"name":"Siming Chen"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1021","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=0h44m42s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"FishEye Watcher: a visual analytics system for knowledge graph bias detection","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1023","abstract":"To solve the 2024 VAST Challenge MC3, we use PageRank and different filtering techniques to select nodes or components of interest. We then use TimeArc, a data visualization technique to visualize the evolution of the corporate structure of these nodes and serve as a tool to investigate and confirm this suspicious behavior. We used these techniques to investigate many nodes including the given SouthSeafood Express Corp that was involved in illegal activity. We discovered a few key features associated with anomalous nodes such as instances of founding shell companies and large power transfers.","accessible_pdf":false,"authors":[{"affiliations":["Texas Tech University, Lubbock, United States"],"email":"ewei341@gmail.com","is_corresponding":false,"name":"Ethan Wei"},{"affiliations":["Texas Tech Univeristy, Lubbock, United States"],"email":"tnhondan@gmail.com","is_corresponding":false,"name":"Tommy Dang"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1023","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Prerecorded video (VAST Challenge submission ID 1024)","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1028","abstract":"Identifying unreliable sources is crucial for preventing misinformation and making informed decisions. CatchNet, the Oceanus Knowledge Graph, contains biased perspectives that threaten its credibility. We use Large Language Models (LLMs) and interactive visualization systems to identify these biases. By analyzing police reports and using GPT-3.5 to extract information from articles, we establish the ground truth for our analysis. Our visual analytics system detects anomalies, revealing unreliable news sources such as The News Buoy and biased analysts such as Harvey Janus and Junior Shurdlu.","accessible_pdf":false,"authors":[{"affiliations":["Funda\u00e7\u00e3o Getulio Vargas, Rio de Janeiro, Brazil","Funda\u00e7\u00e3o Getulio Vargas, Rio de Janeiro, Brazil"],"email":"dany.diaz@ucsp.edu.pe","is_corresponding":true,"name":"Dany Mauro Diaz Espino"},{"affiliations":["FGV, Rio de Janeiro, Brazil","FGV, Rio de Janeiro, Brazil"],"email":"felipe.moreno.vera@gmail.com","is_corresponding":false,"name":"Felipe Moreno-Vera"},{"affiliations":["Getulio Vargas Foundation, Rio de Janeiro, Brazil","Getulio Vargas Foundation, Rio de Janeiro, Brazil"],"email":"jherediaparillo@gmail.com","is_corresponding":false,"name":"Juanpablo Andrew Heredia"},{"affiliations":["Getulio Vargas Foundation, Rio de Janeiro, Brazil","Getulio Vargas Foundation, Rio de Janeiro, Brazil"],"email":"fabricio.venturim@fgv.edu.br","is_corresponding":false,"name":"Fabr\u00edcio Venturim"},{"affiliations":["Get\u00falio Vargas Foundation, Rio de Janeiro, Brazil","Get\u00falio Vargas Foundation, Rio de Janeiro, Brazil"],"email":"jpocom@gmail.com","is_corresponding":false,"name":"Jorge Poco"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1028","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=0h8m24s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"FishBiasLens: Integrating Large Language Models and Visual Analytics for Bias Detection","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1030","abstract":"This paper presents a visual analytics system designed to address the IEEE VAST Challenge 2024 Mini-Challenge 2. The system can support the matching and anomaly detection of multi-source heterogeneous spatio-temporal data, thereby enabling the detection of illegal transport activities. The primary contribution of the system lies in its analysis-driven interaction design.","accessible_pdf":false,"authors":[{"affiliations":["Fudan University, Shanghai, China"],"email":"20302010026@fudan.edu.cn","is_corresponding":true,"name":"Yi Shan"},{"affiliations":["Fudan University, Shanghai, China"],"email":"philipethanzg@gmail.com","is_corresponding":false,"name":"Aolin Guo"},{"affiliations":["Fudan University, Shanghai, China"],"email":"gemini25szk@gmail.com","is_corresponding":false,"name":"Zekai Shao"},{"affiliations":["Fudan University, Shanghai, China"],"email":"qiuttt@foxmail.com","is_corresponding":false,"name":"Tian Qiu"},{"affiliations":["Fudan University, Shanghai, China"],"email":"3504936154@qq.com","is_corresponding":false,"name":"Xueli Shu"},{"affiliations":["Fudan University, Shanghai, China"],"email":"18812571619@163.com","is_corresponding":false,"name":"Qianhui Li"},{"affiliations":["Fudan University, Shanghai, China"],"email":"simingchen3@gmail.com","is_corresponding":false,"name":"Siming Chen"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1030","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=1h3m39s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Visual Analytics for Detecting Illegal Transport Activities","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-scivis-contest-1","abstract":"","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yiming Shao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chengming Liu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zhiyuan Meng"},{"affiliations":"","email":"","is_corresponding":false,"name":"Shufan Qian"},{"affiliations":"","email":"","is_corresponding":false,"name":"Peng Jiang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yunhai Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Dr. Qiong Zeng"}],"award":"","doi":"","event_id":"a-scivis-contest","event_title":"SciVis Contest","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-scivis-contest-1","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest3","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"SciVis Contest","session_uid":"a-scivis-contest","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/paQRbfA0tdg&t=0h10m48s","sessions":["SciVis Contest"],"time_stamp":"2024-10-14T12:30:00Z","title":"PlumeViz: Interactive Exploration for Multi-Facet Features of Hydrothermal Plumes in Sonar Images","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-scivis-contest-2","abstract":"","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Ngan V. T. Nguyen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Minh N. A. Tran"},{"affiliations":"","email":"","is_corresponding":false,"name":"Si Chi Hoang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Vuong Tran Thien"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nguyen Tran Nguyen Thanh"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ngo Ly"},{"affiliations":"","email":"","is_corresponding":false,"name":"Phuc Thien Nguyen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Sinh Huy Gip"},{"affiliations":"","email":"","is_corresponding":false,"name":"Sang Thanh Ngo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nguy\u1ec5n Th\u00e1i H\u00f2a"}],"award":"","doi":"","event_id":"a-scivis-contest","event_title":"SciVis Contest","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-scivis-contest-2","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest3","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"SciVis Contest","session_uid":"a-scivis-contest","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["SciVis Contest"],"time_stamp":"2024-10-14T12:30:00Z","title":"Visualization of Sonar Imaging for Hydrothermal Systems","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-scivis-contest-3","abstract":"","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Adhitya Kamakshidasan"},{"affiliations":"","email":"","is_corresponding":true,"name":"Harikrishnan Pattathil"}],"award":"","doi":"","event_id":"a-scivis-contest","event_title":"SciVis Contest","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-scivis-contest-3","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest3","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"SciVis Contest","session_uid":"a-scivis-contest","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/paQRbfA0tdg&t=0h25m7s","sessions":["SciVis Contest"],"time_stamp":"2024-10-14T12:30:00Z","title":"Topology Based Visualization of Hydrothermal Plumes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1031","abstract":"In soccer, player scouting aims to find players suitable for a team to increase the winning chance in future matches. To scout suitable players, coaches and analysts need to consider whether the players will perform well in a new team, which is hard to learn directly from their historical performances. Match simulation methods have been introduced to scout players by estimating their expected contributions to a new team. However, they usually focus on the simulation of match results and hardly support interactive analysis to navigate potential target players and compare them in fine-grained simulated behaviors. In this work, we propose a visual analytics method to assist soccer player scouting based on match simulation. We construct a two-level match simulation framework for estimating both match results and player behaviors when a player comes to a new team. Based on the framework, we develop a visual analytics system, Team-Scouter, to facilitate the simulative-based soccer player scouting process through player navigation, comparison, and investigation. With our system, coaches and analysts can find potential players suitable for the team and compare them on historical and expected performances. For an in-depth investigation of the players' expected performances, the system provides a visual comparison between the simulated behaviors of the player and the actual ones. The usefulness and effectiveness of the system are demonstrated by two case studies on a real-world dataset and an expert interview.","accessible_pdf":false,"authors":[{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"caoanqi28@163.com","is_corresponding":true,"name":"Anqi Cao"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"xxie@zju.edu.cn","is_corresponding":false,"name":"Xiao Xie"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"2366385033@qq.com","is_corresponding":false,"name":"Runjin Zhang"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"1282533692@qq.com","is_corresponding":false,"name":"Yuxin Tian"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"fanmu_032@zju.edu.cn","is_corresponding":false,"name":"Mu Fan"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"zhang_hui@zju.edu.cn","is_corresponding":false,"name":"Hui Zhang"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1031","image_caption":"System user interface. The interface contains two views: a navigation view (A) and an investigation view (B). The navigation view consists of a squad board (A1) to navigate players will be replaced and a player ranking list (A2) to compare players by personal information and performances. The investigation view includes an on-ball tactic list (B1) for exploring essential on-ball tactics, a player record list (B2) to compare players' simulated actions under a certain on-ball tactic, and a simulated action map (B3) to display players' detailed simulated actions.","keywords":["Soccer Visualization, Player Scouting, Design Study"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1031/v-full-1031_Preview.mp4?token=lZJeB6Xf0ge_YdGbBTun3RHwlGjYyPFmJbqOYHb7olQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1031/v-full-1031_Preview.srt?token=ioob5IkjvKKKvgqxCtdJinqPHLJNypauW9vO7Yg4uxw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-full","session_youtube_ff_id":"p07D01bK_fs","session_youtube_ff_link":"https://youtu.be/p07D01bK_fs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=0h0m6s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T14:15:00Z","title":"Team-Scouter: Simulative Visual Analytics of Soccer Player Scouting","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1099","abstract":"Tactics play an important role in team sports by guiding how players interact on the field. Both sports fans and experts have a demand for analyzing sports tactics. Existing approaches allow users to visually perceive the multivariate tactical effects. However, these approaches require users to experience a complex reasoning process to connect the multiple interactions within each tactic to the final tactical effect. In this work, we collaborate with basketball experts and propose a progressive approach to help users gain a deeper understanding of how each tactic works and customize tactics on demand. Users can progressively sketch on a tactic board, and a coach agent will simulate the possible actions in each step and present the simulation to users with facet visualizations. We develop an extensible framework that integrates large language models (LLMs) and visualizations to help users communicate with the coach agent with multimodal inputs. Based on the framework, we design and develop Smartboard, an agent-based interactive visualization system for fine-grained tactical analysis, especially for play design. Smartboard provides users with a structured process of setup, simulation, and evolution, allowing for iterative exploration of tactics based on specific personalized scenarios. We conduct case studies based on real-world basketball datasets to demonstrate the effectiveness and usefulness of our system.","accessible_pdf":false,"authors":[{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ziao_liu@outlook.com","is_corresponding":true,"name":"Ziao Liu"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"xxie@zju.edu.cn","is_corresponding":false,"name":"Xiao Xie"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"3170101799@zju.edu.cn","is_corresponding":false,"name":"Moqi He"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"zhao_ws@zju.edu.cn","is_corresponding":false,"name":"Wenshuo Zhao"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"wuyihong0606@gmail.com","is_corresponding":false,"name":"Yihong Wu"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"lycheecheng@zju.edu.cn","is_corresponding":false,"name":"Liqi Cheng"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"zhang_hui@zju.edu.cn","is_corresponding":false,"name":"Hui Zhang"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1099","image_caption":"The system interface of Smartboard. (A) The chat view provides system feedback and enhances communication between users and the system through tag selections and open-question answering. (B) The setup view provides interactions during tactical setup with tactics sketching, matchup analysis, and situation retrieval. (C) The simulation view presents the coach agent's recommended tactics, along with explanations and evaluations in both overview and detail. (D) The history view records users' tactics and provides the classic tactics for starting exploration.","keywords":["Sports visualization, tactic board, tactical analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1099/v-full-1099_Preview.mp4?token=EIieXd4BF-80sryrgj0510wUCzzsyFdEgrbI1J5DqPY&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-full","session_youtube_ff_id":"LQ89KZHc_uY","session_youtube_ff_link":"https://youtu.be/LQ89KZHc_uY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=0h25m16s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T14:39:00Z","title":"Smartboard: Visual Exploration of Team Tactics with LLM Agent","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1351","abstract":"As basketball\u2019s popularity surges, fans often find themselves confused and overwhelmed by the rapid game pace and complexity. Basketball tactics, involving a complex series of actions, require substantial knowledge to be fully understood. This complexity leads to a need for additional information and explanation, which can distract fans from the game. To tackle these challenges, we present Sportify, a Visual Question Answering system that integrates narratives and embedded visualization for demystifying basketball tactical questions, aiding fans in understanding various game aspects. We propose three novel action visualizations (i.e., Pass, Cut, and Screen) to demonstrate critical action sequences. To explain the reasoning and logic behind players\u2019 actions, we leverage a large-language model (LLM) to generate narratives. We adopt a storytelling approach for complex scenarios from both first and third-person perspectives, integrating action visualizations. We evaluated Sportify with basketball fans to investigate its impact on understanding of tactics, and how different personal perspectives of narratives impact the understanding of complex tactic with action visualizations. Our evaluation with basketball fans demonstrates Sportify\u2019s capability to deepen tactical insights and amplify the viewing experience. Furthermore, third-person narration assists people in getting in-depth game explanations while first-person narration enhances fans\u2019 game engagement.","accessible_pdf":true,"authors":[{"affiliations":["Harvard University, Allston, United States"],"email":"chungyi347@gmail.com","is_corresponding":true,"name":"Chunggi Lee"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"mlin@g.harvard.edu","is_corresponding":false,"name":"Tica Lin"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"pfister@seas.harvard.edu","is_corresponding":false,"name":"Hanspeter Pfister"},{"affiliations":["University of Minnesota-Twin Cities, Minneapolis, United States"],"email":"ztchen@umn.edu","is_corresponding":false,"name":"Chen Zhu-Tian"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1351","image_caption":"Sportify explains tactic questions in each clip for everyone, aiming to engage users and foster a love for sports. We integrate embedded visualization and personified narratives generated by large language model (LLM) to elucidate a complex series of actions through action detection, tactic classifier, and LLM pipelines.","keywords":["Embedded Visualization, Narrative and storytelling, Basketball tactic, Question-answering (QA) system"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1351/v-full-1351_Preview.mp4?token=bIGHDjLkrgJQpolXS_FEYprMGOPhEwvYQLjlBgRYK6Y&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1351/v-full-1351_Preview.srt?token=L7wsQyu7e4q3byPVBl_dcO_CathD-9_Gwi7yGt8AKvk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-full","session_youtube_ff_id":"IZil979U9UQ","session_youtube_ff_link":"https://youtu.be/IZil979U9UQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=0h12m34s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T14:27:00Z","title":"Sportify: Question Answering with Embedded Visualizations and Personified Narratives for Sports Video","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1571","abstract":"Effective security patrol management is critical for ensuring safety in diverse environments such as art galleries, airports, and factories. The behavior of patrols in these situations can be modeled by patrolling games. They simulate the behavior of the patrol and adversary in the building, which is modeled as a graph of interconnected nodes representing rooms. The designers of algorithms solving the game face the problem of analyzing complex graph layouts with temporal dependencies. Therefore, appropriate visual support is crucial for them to work effectively. In this paper, we present a novel tool that helps the designers of patrolling games explore the outcomes of the proposed algorithms and approaches, evaluate their success rate, and propose modifications that can improve their solutions. Our tool offers an intuitive and interactive interface, featuring a detailed exploration of patrol routes and probabilities of taking them, simulation of patrols, and other requested features. In close collaboration with experts in designing patrolling games, we conducted three case studies demonstrating the usage and usefulness of our tool. The prototype of the tool, along with exemplary datasets, is available at https://gitlab.fi.muni.cz/formela/strategy-vizualizer.","accessible_pdf":false,"authors":[{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"langm@mail.muni.cz","is_corresponding":true,"name":"Mat\u011bj Lang"},{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"469242@mail.muni.cz","is_corresponding":false,"name":"Adam \u0160t\u011bp\u00e1nek"},{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic"],"email":"514179@mail.muni.cz","is_corresponding":false,"name":"R\u00f3bert Zvara"},{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic"],"email":"rehak@fi.muni.cz","is_corresponding":false,"name":"Vojt\u011bch \u0158eh\u00e1k"},{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"kozlikova@fi.muni.cz","is_corresponding":false,"name":"Barbora Kozlikova"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1571","image_caption":"The screen of the visualization tool, featuring a Markov chain representing a patroller's strategy. On the left, there is a transition matrix providing an alternative view of the Markov chain. On the right, there is a bar chart showing the probability distribution in time of the patroller's presence.","keywords":["Patrolling Games, Strategy, Graph, Heatmap, Visual Analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1571/v-full-1571_Preview.mp4?token=f8B5YPqvr32ERZQcdM_iNJ8vQlq0Lo2mxVfog7Id2s8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1571/v-full-1571_Preview.srt?token=3NQFm8yUvCvIu1-6NfSH5ehXGG9kmvr4XDrckdLdxoM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-full","session_youtube_ff_id":"BgFsC5T5ILM","session_youtube_ff_link":"https://youtu.be/BgFsC5T5ILM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=1h2m25s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T15:15:00Z","title":"Who Let the Guards Out: Visual Support for Patrolling Games","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243394745","abstract":"The fund investment industry heavily relies on the expertise of fund managers, who bear the responsibility of managing portfolios on behalf of clients. With their investment knowledge and professional skills, fund managers gain a competitive advantage over the average investor in the market. Consequently, investors prefer entrusting their investments to fund managers rather than directly investing in funds. For these investors, the primary concern is selecting a suitable fund manager. While previous studies have employed quantitative or qualitative methods to analyze various aspects of fund managers, such as performance metrics, personal characteristics, and performance persistence, they often face challenges when dealing with a large candidate space. Moreover, distinguishing whether a fund manager's performance stems from skill or luck poses a challenge, making it difficult to align with investors' preferences in the selection process. To address these challenges, this study characterizes the requirements of investors in selecting suitable fund managers and proposes an interactive visual analytics system called FMLens. This system streamlines the fund manager selection process, allowing investors to efficiently assess and deconstruct fund managers' investment styles and abilities across multiple dimensions. Additionally, the system empowers investors to scrutinize and compare fund managers' performances. The effectiveness of the approach is demonstrated through two case studies and a qualitative user study. Feedback from domain experts indicates that the system excels in analyzing fund managers from diverse perspectives, enhancing the efficiency of fund manager evaluation and selection.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Longfei Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chen Cheng"},{"affiliations":"","email":"","is_corresponding":false,"name":"He Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xiyuan Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yun Tian"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xuanwu Yue"},{"affiliations":"","email":"","is_corresponding":false,"name":"Wong Kam-Kwai"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haipeng Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Suting Hong"},{"affiliations":"","email":"","is_corresponding":false,"name":"Quan Li"}],"award":"","doi":"10.1109/TVCG.2024.3394745","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243394745","image_caption":"FMLens consists of four views: (A) The FM Overview serves as a summary of the fund manager candidate space. (B) The Ranking View facilitates the examination of fund managers' performance evolution and supports interactive ranking. (C) The Historical Management View provides a comprehensive review of fund managers' management records. (D) The Comparison View is crafted to facilitate the comparison of fund performance among one of more fund managers.","keywords":["Financial Data, Fund Manager Selection, Visual Analytics"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243394745/v-tvcg-20243394745_Preview.mp4?token=S_DhSMZGe1pa7MrGSlMaB6fb6AOeVwyJ8Vq6x2taTpw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243394745/v-tvcg-20243394745_Preview.srt?token=5bL8lQRkexpMVVbZFfcuYdKEFLNpR__BUaDOBP2BXqY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-tvcg","session_youtube_ff_id":"AK2XOfpvC6o","session_youtube_ff_link":"https://youtu.be/AK2XOfpvC6o","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=0h37m42s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T14:51:00Z","title":"FMLens: Towards Better Scaffolding the Process of Fund Manager Selection in Fund Investments","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243402834","abstract":"Impact dynamics are crucial for estimating the growth patterns of NFT projects by tracking the diffusion and decay of their relative appeal among stakeholders. Machine learning methods for impact dynamics analysis are incomprehensible and rigid in terms of their interpretability and transparency, whilst stakeholders require interactive tools for informed decision-making. Nevertheless, developing such a tool is challenging due to the substantial, heterogeneous NFT transaction data and the requirements for flexible, customized interactions. To this end, we integrate intuitive visualizations to unveil the impact dynamics of NFT projects. We first conduct a formative study and summarize analysis criteria, including substitution mechanisms, impact attributes, and design requirements from stakeholders. Next, we propose the Minimal Substitution Model to simulate substitutive systems of NFT projects that can be feasibly represented as node-link graphs. Particularly, we utilize attribute-aware techniques to embed the project status and stakeholder behaviors in the layout design. Accordingly, we develop a multi-view visual analytics system, namely NFTracer, allowing interactive analysis of impact dynamics in NFT transactions. We demonstrate the informativeness, effectiveness, and usability of NFTracer by performing two case studies with domain experts and one user study with stakeholders. The studies suggest that NFT projects featuring a higher degree of similarity are more likely to substitute each other. The impact of NFT projects within substitutive systems is contingent upon the degree of stakeholders\u2019 influx and projects\u2019 freshness.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yifan Cao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Qing Shi"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lucas Shen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kani Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yang Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Wei Zeng"},{"affiliations":"","email":"","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"10.1109/TVCG.2024.3402834","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243402834","image_caption":"**Figure 1:** Understanding the evolving appeal of NFT projects requires analyzing impact dynamics. NFTracer tackles this challenge with a multi-view visual analytics system, addressing limitations of existing machine learning methods. The interface offers four distinct views: (A) Propensity Analysis, (B) Mechanisms Analysis, (C) Substitution View, and (D) Impact Dynamic View. This example visualizes the multifaceted stakeholder flow (MSF) between CryptoPunks and Cool Cats, revealing co-occurring stakeholders (D1-3) and the temporal evolution of their impact dynamics (D4) through NFTracer's analytical capabilities.","keywords":["Stakeholders, Nonfungible Tokens, Social Networking Online, Visual Analytics, Network Analyzers, Measurement, Layout, Impact Dynamics Analysis, Non Fungible Tokens NF Ts, NFT Transaction Data, Substitutive Systems, Visual Analytics"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.48550/arXiv.2409.15754","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243402834/v-tvcg-20243402834_Preview.mp4?token=p2neHggXdErinSRXo2Xkd7ZOVpBAATdOPM_U9-LNycc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243402834/v-tvcg-20243402834_Preview.srt?token=DfO05-24gws9c8vL_kEeZJVzz3xWDOx0Nz9TyRq-lug&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-tvcg","session_youtube_ff_id":"00yRDSY-1Kk","session_youtube_ff_link":"https://youtu.be/00yRDSY-1Kk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=0h50m58s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T15:03:00Z","title":"Tracing NFT Impact Dynamics in Transaction-flow Substitutive Systems with Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1290","abstract":"Visualization linters are end-user facing evaluators that automatically identify potential chart issues. These spell-checker like systems offer a blend of interpretability and customization that is not found in other forms of automated assistance. However, existing linters do not model context and have primarily targeted users who do not need assistance, resulting in obvious---even annoying---advice. We investigate these issues within the domain of color palette design, which serves as a microcosm of visualization design concerns. We contribute a GUI-based color palette linter as a design probe that covers perception, accessibility, context, and other design criteria, and use it to explore visual explanations, integrated fixes, and user defined linting rules. Through a formative interview study and theory-driven analysis, we find that linters can be meaningfully integrated into graphical contextsthereby addressing many of their core issues.We discuss implications for integrating linters into visualization tools, developing improved assertion languages, and supporting end-user tunable advice---all laying the groundwork for more effective visualization linters in any context.","accessible_pdf":true,"authors":[{"affiliations":["University of Washington, Seattle, United States","University of Utah, Salt Lake City, United States"],"email":"mcnutt.andrew@gmail.com","is_corresponding":true,"name":"Andrew M McNutt"},{"affiliations":["University of Washington, Seattle, United States"],"email":"maureen.stone@gmail.com","is_corresponding":false,"name":"Maureen Stone"},{"affiliations":["University of Washington, Seattle, United States"],"email":"jheer@uw.edu","is_corresponding":false,"name":"Jeffrey Heer"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1290","image_caption":"How do you know when what you\u2019ve done is right? Visualization linters provide concrete feedback about chart designs, but so far they have had interface issues that have limited their usefulness. This work introduces a linter (PaletteLint) for color palettes (and a GUI called Color Buddy, pictured here) that explores ways to deal with these issues.","keywords":["Linters, Color Palette Design, Design Probe, Reflection"],"open_access_supplemental_link":"https://osf.io/geauf","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.21285","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1290/v-full-1290_Preview.mp4?token=NkJBbD0Gt1xNNy93YGu-u2bvj6CkT8d0wcJdXjEMsHY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1290/v-full-1290_Preview.srt?token=mgzxzUfCmwcweO_V8-7UDRSbREJDumR1vwvE7SA5do0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-full","session_youtube_ff_id":"CY7ycxWmLkw","session_youtube_ff_link":"https://youtu.be/CY7ycxWmLkw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=0h12m6s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T17:57:00Z","title":"Mixing Linters with GUIs: A Color Palette Design Probe","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1595","abstract":"Assigning discriminable and harmonic colors to samples according to their class labels and spatial distribution can generate attractive visualizations and facilitate data exploration. However, as the number of classes increases, it is challenging to generate a high-quality color assignment result that accommodates all classes simultaneously. A practical solution is to organize classes into a hierarchy and then dynamically assign colors during exploration. However, existing color assignment methods fall short in generating high-quality color assignment results and dynamically aligning them with hierarchical structures. To address this issue, we develop a dynamic color assignment method for hierarchical data, which is formulated as a multi-objective optimization problem. This method simultaneously considers color discriminability, color harmony, and spatial distribution at each hierarchical level. By using the colors of parent classes to guide the color assignment of their child classes, our method further promotes both consistency and clarity across hierarchical levels. We demonstrate the effectiveness of our method in generating dynamic color assignment results with quantitative experiments and a user study.","accessible_pdf":false,"authors":[{"affiliations":["Tsinghua University, Beijing, China"],"email":"jiashu0717c@gmail.com","is_corresponding":false,"name":"Jiashu Chen"},{"affiliations":["Tsinghua University, Beijing, China"],"email":"vicayang496@gmail.com","is_corresponding":true,"name":"Weikai Yang"},{"affiliations":["Tsinghua University, Beijing, China"],"email":"jiazl22@mails.tsinghua.edu.cn","is_corresponding":false,"name":"Zelin Jia"},{"affiliations":["Tsinghua University, Beijing, China"],"email":"tarolancy@gmail.com","is_corresponding":false,"name":"Lanxi Xiao"},{"affiliations":["Tsinghua University, Beijing, China"],"email":"shixia@tsinghua.edu.cn","is_corresponding":false,"name":"Shixia Liu"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1595","image_caption":"Based on user exploration, our method dynamically selects the color range and assigns colors to classes within the range, which ensures high discriminability and harmony at each level and maintains consistency across different levels.","keywords":["Color assignment, Hierarchical Visualization, Discriminability, Harmony."],"open_access_supplemental_link":"https://osf.io/e4b5u/?view_only=68cc67c194c443b498bd2545ef551faa","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14742","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1595/v-full-1595_Preview.mp4?token=toj6GxPoOhr1jLn6j9nD71SzjSzQTxHw6k3YcrcXjAo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1595/v-full-1595_Preview.srt?token=jIle7eUQz-qE5I07DktS3QVaM3zuo11YTABZfXIlcjc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-full","session_youtube_ff_id":"RjtAd4XmMsU","session_youtube_ff_link":"https://youtu.be/RjtAd4XmMsU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=0h24m59s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T18:09:00Z","title":"Dynamic Color Assignment for Hierarchical Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1836","abstract":"Shape is commonly used to distinguish between categories in multi-class scatterplots. However, existing guidelines for choosing effective shape palettes rely largely on intuition and do not consider how these needs may change as the number of categories increases. Unlike color, shapes can not be represented by a numerical space, making it difficult to propose general guidelines or design heuristics for using shape effectively. This paper presents a series of four experiments evaluating the efficiency of 39 shapes across three tasks: relative mean judgment tasks, expert preference, and correlation estimation. Our results show that conventional means for reasoning about shapes, such as filled versus unfilled, are insufficient to inform effective palette design. Further, even expert palettes vary significantly in their use of shape and corresponding effectiveness. To support effective shape palette design, we developed a model based on pairwise relations between shapes in our experiments and the number of shapes required for a given design. We embed this model in a palette design tool to give designers agency over shape selection while incorporating empirical elements of perceptual performance captured in our study. Our model advances understanding of shape perception in visualization contexts and provides practical design guidelines that can help improve categorical data encodings. ","accessible_pdf":false,"authors":[{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"chint@cs.unc.edu","is_corresponding":true,"name":"Chin Tseng"},{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"zeyuwang@cs.unc.edu","is_corresponding":false,"name":"Arran Zeyu Wang"},{"affiliations":["University of Oklahoma, Norman, United States"],"email":"quadri@ou.edu","is_corresponding":false,"name":"Ghulam Jilani Quadri"},{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"danielle.szafir@cs.unc.edu","is_corresponding":false,"name":"Danielle Albers Szafir"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1836","image_caption":"We present a web-based shape recommendation tool based on our empirical studies. Users can input their target category number and preferred shape, and the tool will provide a shape palette based on a pairwise distance model between shapes generated using our experimental results. The output shape palette can also be modified by swapping out certain shapes, which the system will replace using data-driven recommendations. ","keywords":["Categorical perception, shape perception, multiclass scatterplots, visualization effectiveness, quantitative study"],"open_access_supplemental_link":"https://osf.io/5k47c/?view_only=52e6b52f69b84ceab8c8c1b897083fc3","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1836/v-full-1836_Preview.mp4?token=xxn-m06rWMTDUsTGJC38ewDLCvtsHQOwHMkSA2HSmDA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1836/v-full-1836_Preview.srt?token=rAB3h0TDWwRdw_o0wSQ1-fHqwgOwkRj6JvdCsF3r78Q&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-full","session_youtube_ff_id":"SSB0MEkju-s","session_youtube_ff_link":"https://youtu.be/SSB0MEkju-s","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=0h36m40s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T18:21:00Z","title":"An Empirically Grounded Approach for Designing Shape Palettes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233275925","abstract":"A contiguous area cartogram is a geographic map in which the area of each region is proportional to numerical data (e.g., population size) while keeping neighboring regions connected. In this study, we investigated whether value-to-area legends (square symbols next to the values represented by the squares' areas) and grid lines aid map readers in making better area judgments. We conducted an experiment to determine the accuracy, speed, and confidence with which readers infer numerical data values for the mapped regions. We found that, when only informed about the total numerical value represented by the whole cartogram without any legend, the distribution of estimates for individual regions was centered near the true value with substantial spread. Legends with grid lines significantly reduced the spread but led to a tendency to underestimate the values. Comparing differences between regions or between cartograms revealed that legends and grid lines slowed the estimation without improving accuracy. However, participants were more likely to complete the tasks when legends and grid lines were present, particularly when the area units represented by these features could be interactively selected. We recommend considering the cartogram's use case and purpose before deciding whether to include grid lines or an interactive legend.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Kelvin L. T. Fung"},{"affiliations":"","email":"","is_corresponding":false,"name":"Simon T. Perrault"},{"affiliations":"","email":"","is_corresponding":false,"name":"Michael T. Gastner"}],"award":"","doi":"10.1109/TVCG.2023.3275925","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233275925","image_caption":"Area cartograms resize regions based on data like population or GDP. Our user study evaluated whether legends and grid lines help readers estimate these values accurately. We found that legends and grid lines improve consistency and task completion but slow down estimation. Our findings suggest practical consideration of these features in cartogram design.","keywords":["Task Analysis, Symbols, Data Visualization, Sociology, Visualization, Switches, Mice, Cartogram, Geovisualization, Interactive Data Exploration, Quantitative Evaluation"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233275925/v-tvcg-20233275925_Preview.mp4?token=BulF51oG-kLnA-qi6h8E5DjKPUTmmjELljvCv1OAGGA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233275925/v-tvcg-20233275925_Preview.srt?token=j5V-AYjzUCUA03WOMhzvlbCUQ42tOZkPPmqHSHwrGK8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-tvcg","session_youtube_ff_id":"lDlvZRQYPwU","session_youtube_ff_link":"https://youtu.be/lDlvZRQYPwU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=0h50m10s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T18:33:00Z","title":"Effectiveness of Area-to-Value Legends and Grid Lines in Contiguous Area Cartograms","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233289292","abstract":"Reading a visualization is like reading a paragraph. Each sentence is a comparison: the mean of these is higher than those; this difference is smaller than that. What determines which comparisons are made first? The viewer's goals and expertise matter, but the way that values are visually grouped together within the chart also impacts those comparisons. Research from psychology suggests that comparisons involve multiple steps. First, the viewer divides the visualization into a set of units. This might include a single bar or a grouped set of bars. Then the viewer selects and compares two of these units, perhaps noting that one pair of bars is longer than another. Viewers might take an additional third step and perform a second-order comparison, perhaps determining that the difference between one pair of bars is greater than the difference between another pair. We create a visual comparison taxonomy that allows us to develop and test a sequence of hypotheses about which comparisons people are more likely to make when reading a visualization. We find that people tend to compare two groups before comparing two individual bars and that second-order comparisons are rare. Visual cues like spatial proximity and color can influence which elements are grouped together and selected for comparison, with spatial proximity being a stronger grouping cue. Interestingly, once the viewer grouped together and compared a set of bars, regardless of whether the group is formed by spatial proximity or color similarity, they no longer consider other possible groupings in their comparisons.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Cindy Xiong Bearfield"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chase Stokes"},{"affiliations":"","email":"","is_corresponding":false,"name":"Andrew Lovett"},{"affiliations":"","email":"","is_corresponding":false,"name":"Steven Franconeri"}],"award":"","doi":"10.1109/TVCG.2023.3289292","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233289292","image_caption":"When designing simple bar charts depicting the revenue of two companies A and B in two regions East and West, one can group the bars spatially by company such that West A and East A are closer together, and West B and East B are close together. One can also add color to the bars, such as coloring the two A bars the same color, and the two B bars the same color. We compared the spatial proximity cue against the color cue, and found people to prioritize the spatial proximity cue when making comparisons. That is, they are more likely to group bars that are next to each other, even if they have different colors, to be compared to bars further away. They are less likely to group bars that further away from each other even if they have the same color.","keywords":["comparison, perception, visual grouping, bar charts, verbal conclusions."],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233289292/v-tvcg-20233289292_Preview.mp4?token=FioY-BPSr_2MbWrgkD2XLq8GLNsMALBaddZ05hvtkfI&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-tvcg","session_youtube_ff_id":"khn38dy2CQk","session_youtube_ff_link":"https://youtu.be/khn38dy2CQk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=1h0m57s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T18:45:00Z","title":"What Does the Chart Say? Grouping Cues Guide Viewer Comparisons and Conclusions in Bar Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233322372","abstract":"Visualization linting is a proven effective tool in assisting users to follow established visualization guidelines. Despite its success, visualization linting for choropleth maps, one of the most popular visualizations on the internet, has yet to be investigated. In this paper, we present GeoLinter, a linting framework for choropleth maps that assists in creating accurate and robust maps. Based on a set of design guidelines and metrics drawing upon a collection of best practices from the cartographic literature, GeoLinter detects potentially suboptimal design decisions and provides further recommendations on design improvement with explanations at each step of the design process. We perform a validation study to evaluate the proposed framework's functionality with respect to identifying and fixing errors and apply its results to improve the robustness of GeoLinter. Finally, we demonstrate the effectiveness of the GeoLinter - validated through empirical studies - by applying it to a series of case studies using real-world datasets.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Fan Lei"},{"affiliations":"","email":"","is_corresponding":false,"name":"Arlen Fan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alan M. MacEachren"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ross Maciejewski"}],"award":"","doi":"10.1109/TVCG.2023.3322372","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233322372","image_caption":"The GeoLinter Interface: (A) the VegaLite code editor; (B) the original map; (C) the map after applying soft fixes; (D) classification recommendations; (E) detected violations with guides on map improvements, and; (F) the status panel. A choropleth map showing the value per capita of freight shipments in the U.S. by state 2002. In the original choropleth map design (B), the data classification accuracy is lower than the average value; the colors between bins are nearly indistinguishable; the map data has not been normalized and the data units are missing. After applying the suggested fixes from GeoLinter, the designer produces (C).","keywords":["Data visualization , Image color analysis , Geology , Recommender systems , Guidelines , Bars , Visualization Author Keywords: Automated visualization design , choropleth maps , visualization linting , visualization recommendation"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2310.13707","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233322372/v-tvcg-20233322372_Preview.mp4?token=-k42yJtftMEU_gUi7HIlQR2mfoAwnh4Gh3wzAVhT740&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233322372/v-tvcg-20233322372_Preview.srt?token=JHK35i5tO62M4yeLJ_2PfyJgF0mJSTX6uxdiwQBk3Iw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-tvcg","session_youtube_ff_id":"p6_Sf3E7KPI","session_youtube_ff_link":"https://youtu.be/p6_Sf3E7KPI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=0h0m55s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T17:45:00Z","title":"GeoLinter: A Linting Framework for Choropleth Maps","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1060","abstract":"There is increased interest in understanding the interplay between text and visuals in the field of data visualization. However, this attention has predominantly been on the use of text in standalone visualizations (such as text annotation overlays) or augmenting text stories supported by a series of independent views. In this paper, we shift from the traditional focus on single-chart annotations to characterize the nuanced but crucial communication role of text in the complex environment of interactive dashboards. Through a survey and analysis of 190 dashboards in the wild, plus 13 expert interview sessions with experienced dashboard authors, we highlight the distinctive nature of text as an integral component of the dashboard experience, while delving into the categories, semantic levels, and functional roles of text, and exploring how these text elements are coalesced by dashboard authors to guide and inform dashboard users. Our contributions are threefold. First, we distill qualitative and quantitative findings from our studies to characterize current practices of text use in dashboards, including a categorization of text-based components and design patterns. Second, we leverage current practices and existing literature to propose, discuss, and validate recommended practices for text in dashboards, embodied as a set of 12 heuristics that underscore the semantic and functional role of text in offering navigational cues, contextualizing data insights, supporting reading order, among other concerns. Third, we reflect on our findings to identify gaps and propose opportunities for data visualization researchers to push the boundaries on text usage for dashboards, from authoring support and interactivity to text generation and content personalization. Our research underscores the significance of elevating text as a first-class citizen in data visualization, and the need to support the inclusion of textual components and their interactive affordances in dashboard design.","accessible_pdf":true,"authors":[{"affiliations":["Tableau Research, Seattle, United States"],"email":"nicole.sultanum@gmail.com","is_corresponding":true,"name":"Nicole Sultanum"},{"affiliations":["Tableau Research, Palo Alto, United States"],"email":"vsetlur@tableau.com","is_corresponding":false,"name":"Vidya Setlur"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1060","image_caption":"Our work seeks to elevate text as a first-class citizen in dashboards. From a survey and analysis of 190 dashboards and interview feedback from 13 experts, we (a) highlight current dashboard text practices, (b) propose and validate recommended practices as a set of 12 heuristics for dashboard text, and (c) outline opportunities for future research to take dashboard text to the next level.","keywords":["Text, dashboards, semantic levels, metadata, interactivity, instruction, description, takeaways, conversational heuristics"],"open_access_supplemental_link":"https://osf.io/49zp5/?view_only=cafb29af267d4b50a379050695c39712","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14451","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1060/v-full-1060_Preview.mp4?token=piuK0GmI2L8BDBH612TjHAs-Q5a4a9J1XrqK1pw0boM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1060/v-full-1060_Preview.srt?token=pP4Qpqztv-Lw1dRZpbJq792fxgCsh957Fu9fquq0v9A&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-full","session_youtube_ff_id":"OZmdwGmz1BI","session_youtube_ff_link":"https://youtu.be/OZmdwGmz1BI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=0h37m51s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T13:06:00Z","title":"From Instruction to Insight: Exploring the Semantic and Functional Roles of Text in Interactive Dashboards","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1295","abstract":"Annotations play a vital role in highlighting critical aspects of visualizations, aiding in data externalization and exploration, collaborative sensemaking, and visual storytelling. However, despite their widespread use, we identified a lack of a design space for common practices for annotations. In this paper, we evaluated over 1,800 static annotated charts to understand how people annotate visualizations in practice. Through qualitative coding of these diverse real-world annotated charts, we explored three primary aspects of annotation usage patterns: analytic purposes for chart annotations (e.g., present, identify, summarize, or compare data features), mechanisms for chart annotations (e.g., types and combinations of annotations used, frequency of different annotation types across chart types, etc.), and the data source used to generate the annotations. We then synthesized our findings into a design space of annotations, highlighting key design choices for chart annotations. We presented three case studies illustrating our design space as a practical framework for chart annotations to enhance the communication of visualization insights. All supplemental materials are available at \\url{https://shorturl.at/bAGM1}.","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States","University of Utah, Salt Lake City, United States"],"email":"dilshadur@sci.utah.edu","is_corresponding":true,"name":"Md Dilshadur Rahman"},{"affiliations":["University of Oklahoma, Norman, United States","University of Oklahoma, Norman, United States"],"email":"quadri@ou.edu","is_corresponding":false,"name":"Ghulam Jilani Quadri"},{"affiliations":["University of South Florida , Tampa, United States","University of South Florida , Tampa, United States"],"email":"bdoppalapudi@usf.edu","is_corresponding":false,"name":"Bhavana Doppalapudi"},{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States","University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"danielle.szafir@cs.unc.edu","is_corresponding":false,"name":"Danielle Albers Szafir"},{"affiliations":["University of Utah, Salt Lake City, United States","University of Utah, Salt Lake City, United States"],"email":"paul.rosen@utah.edu","is_corresponding":false,"name":"Paul Rosen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1295","image_caption":"A line chart from The Washington Post illustrates COVID-19 peak comparisons, plotting time on the horizontal axis and percentage growth relative to the January 2021 peak vertically: top-left shows the baseline chart with basic visualization elements (i.e., axes, labels, lines, legends, and gridlines) but with annotations removed; top-right uses color+enclosure+text ensembles of annotations to help identify the peaks of different COVID-19 waves; bottom-left uses text+connector ensembles to present additional context from the associated article; and bottom-right displays the completely annotated chart.","keywords":["Annotations, visualizations, qualitative study, design space, taxonomy"],"open_access_supplemental_link":"https://shorturl.at/bAGM1","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2306.06043","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1295/v-full-1295_Preview.mp4?token=_i10nu4OVfcDWUUNBh5WygxjQbR6zDtTaRFQ2SYbFSA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1295/v-full-1295_Preview.srt?token=vDAxRPdIju68mPl5pEWvJRetZ5qX4nU96y3icPRXq8g&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-full","session_youtube_ff_id":"UiheOlbONP0","session_youtube_ff_link":"https://youtu.be/UiheOlbONP0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=0h12m39s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T12:42:00Z","title":"A Qualitative Analysis of Common Practices in Annotations: A Taxonomy and Design Space","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1316","abstract":"We apply an approach from cognitive linguistics by mapping Conceptual Metaphor Theory (CMT) to the visualization domain to address patterns of visual conceptual metaphors that are often used in science infographics. Metaphors play an essential part in visual communication and are frequently employed to explain complex concepts. However, their use is often based on intuition, rather than following a formal process. At present, we lack tools and language for understanding and describing metaphor use in visualization to the extent where taxonomy and grammar could guide the creation of visual components, e.g., infographics. Our classification of the visual conceptual mappings within scientific representations is based on the breakdown of visual components in existing scientific infographics. We demonstrate the development of this mapping through a detailed analysis of data collected from four domains (biomedicine, climate, space, and anthropology) that represent a diverse range of visual conceptual metaphors used in the visual communication of science. This work allows us to identify patterns of visual conceptual metaphor use within the domains, resolve ambiguities about why specific conceptual metaphors are used, and develop a better overall understanding of visual metaphor use in scientific infographics. Our analysis shows that ontological and orientational conceptual metaphors are the most widely applied to translate complex scientific concepts. To support our findings we developed a visual exploratory tool based on the collected database that places the individual infographics on a spatio-temporal scale and illustrates the breakdown of visual conceptual metaphors.","accessible_pdf":false,"authors":[{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"hana.pokojna@gmail.com","is_corresponding":true,"name":"Hana Pokojn\u00e1"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tobias.isenberg@gmail.com","is_corresponding":false,"name":"Tobias Isenberg"},{"affiliations":["University of Rostock, Rostock, Germany"],"email":"stefan.bruckner@gmail.com","is_corresponding":false,"name":"Stefan Bruckner"},{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"kozlikova@fi.muni.cz","is_corresponding":false,"name":"Barbora Kozlikova"},{"affiliations":["University of Bergen, Bergen, Norway","Haukeland University Hospital, University of Bergen, Bergen, Norway"],"email":"laura.garrison@uib.no","is_corresponding":false,"name":"Laura Garrison"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1316","image_caption":"image_VisualMetaphors This image illustrates our process (from left to right) for identifying and classifying visual conceptual metaphors in scientific infographics: 1) deconstruct a given infographic to its component graphics, 2) identify component graphics as visual conceptual metaphors versus visual abstractions, 3) classify the conceptual metaphor type (structural, ontological, orientational, or imagistic), and 4) provide infographic metadata and classify the spatiotemporal scale of the phenomenon visualized to enable detailed investigation in our Visual Exploratory Tool. ","keywords":["Visualization, visual metaphors, science communication, conceptual metaphors, visual communication"],"open_access_supplemental_link":"https://osf.io/8xrjm/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2407.13416","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1316/v-full-1316_Preview.mp4?token=aom1JV67M5JX6eNDgcArCWIG3btpoAxnO5PPvGZrCZI&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-full","session_youtube_ff_id":"vydQsSgBECk","session_youtube_ff_link":"https://youtu.be/vydQsSgBECk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=0h25m10s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T12:54:00Z","title":"The Language of Infographics: Toward Understanding Conceptual Metaphor Use in Scientific Storytelling","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1594","abstract":"The visualization community has a rich history of reflecting upon visualization design flaws. Although research in this area has remained lively, we believe it is essential to continuously revisit this classic and critical topic in visualization research by incorporating more empirical evidence from diverse sources, characterizing new design flaws, building more systematic theoretical frameworks, and understanding the underlying reasons for these flaws. To address the above gaps, this work investigated visualization design flaws through the lens of the public, constructed a framework to summarize and categorize the identified flaws, and explored why these flaws occur. Specifically, we analyzed 2227 flawed data visualizations collected from an online gallery and derived a design task-associated taxonomy containing 76 specific design flaws. These flaws were further classified into three high-level categories (i.e., misinformation, uninformativeness, unsociability) and ten subcategories (e.g., inaccuracy, unfairness, ambiguity). Next, we organized five focus groups to explore why these design flaws occur and identified seven causes of the flaws. Finally, we proposed a research agenda for combating visualization design flaws and summarize nine research opportunities.","accessible_pdf":false,"authors":[{"affiliations":["Fudan University, Shanghai, China","Fudan University, Shanghai, China"],"email":"xingyulan96@gmail.com","is_corresponding":true,"name":"Xingyu Lan"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom","University of Edinburgh, Edinburgh, United Kingdom"],"email":"coraline.liu.dataviz@gmail.com","is_corresponding":false,"name":"Yu Liu"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1594","image_caption":"The image consists of three panels: (i) a taxonomy of 76 design flaws, categorized into 3 high-level categories and 10 subcategories; (ii) an example of our website displaying detailed information on design flaws and the corpus; and (iii) an agenda on HOW to combat visualization design flaws.","keywords":["Visualization Design, General Public, Chart Junk, Deceptive Visualization, Misinformation, User Experience"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1594/v-full-1594_Preview.mp4?token=B9Jdz8SGk1SHbPzpmdPFshIFsnyhUeDMu4Jk_yQSLvQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1594/v-full-1594_Preview.srt?token=LnGDD4FF6qg_f5naq4IZIiYCBHpBuyjrXVzyyUDpyXA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-full","session_youtube_ff_id":"4OD9F2xvtPk","session_youtube_ff_link":"https://youtu.be/4OD9F2xvtPk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=0h51m10s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T13:18:00Z","title":"\"I Came Across a Junk\": Understanding Design Flaws of Data Visualization from the Public's Perspective","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1810","abstract":"Classical bibliography, by researching preserved catalogs from both official archives and personal collections of accumulated books, examines the books throughout history, thereby revealing cultural development across historical periods. In this work, we collaborate with domain experts to accomplish the task of data annotation concerning Chinese ancient catalogs. We introduce the CataAnno system that facilitates users in completing annotations more efficiently through cross-linked views, recommendation methods and convenient annotation interactions. The recommendation method can learn the background knowledge and annotation patterns that experts subconsciously integrate into the data during prior annotation processes. CataAnno searches for the most relevant examples previously annotated and recommends to the user. Meanwhile, the cross-linked views assist users in comprehending the correlations between entries and offer explanations for these recommendations. Evaluation and expert feedback confirm that the CataAnno system, by offering high-quality recommendations and visualizing the relationships between entries, can mitigate the necessity for specialized knowledge during the annotation process. This results in enhanced accuracy and consistency in annotations, thereby enhancing the overall efficiency.","accessible_pdf":false,"authors":[{"affiliations":["Peking University, Beijing, China"],"email":"hanning.shao@pku.edu.cn","is_corresponding":true,"name":"Hanning Shao"},{"affiliations":["Peking University, Beijing, China"],"email":"xiaoru.yuan@pku.edu.cn","is_corresponding":false,"name":"Xiaoru Yuan"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1810","image_caption":"Classical bibliography examines the books throughout history and reveal cultural development by researching preserved catalogs. Through interdisciplinary collaboration, we propose CataAnno, an intelligent annotation system that helps with annotation cleaning of these ancient catalogs. Learning base recommendations and convenient interactions supported by CataAnno enhances the consistency and efficiency of the annotation process.","keywords":["Digital humanities, text annotation tool, text visualization, machine learning, catalog"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1810/v-full-1810_Preview.mp4?token=svlpWrXTXba_ADnrB67YdHrra2wvXxdOQlnNYeB_NlU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1810/v-full-1810_Preview.srt?token=-cCkRQ40jUMa22Rdt6KH-foUcUxQp4MuQSNoO1WfsZs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-full","session_youtube_ff_id":"JP2jrdeR04g","session_youtube_ff_link":"https://youtu.be/JP2jrdeR04g","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=1h4m54s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T13:30:00Z","title":"CataAnno: An Ancient Catalog Annotator for Annotation Cleaning by Recommendation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233338451","abstract":"This paper investigates the role of text in visualizations, specifically the impact of text position, semantic content, and biased wording. Two empirical studies were conducted based on two tasks (predicting data trends and appraising bias) using two visualization types (bar and line charts). While the addition of text had a minimal effect on how people perceive data trends, there was a significant impact on how biased they perceive the authors to be. This finding revealed a relationship between the degree of bias in textual information and the perception of the authors' bias. Exploratory analyses support an interaction between a person's prediction and the degree of bias they perceived. This paper also develops a crowdsourced methodfor creating chart annotations that range from neutral to highly biased.This research highlights the need for designers to mitigate potential polarization of readers' opinions based on howauthors' ideas are expressed.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Chase Stokes"},{"affiliations":"","email":"","is_corresponding":false,"name":"Cindy Xiong Bearfield"},{"affiliations":"","email":"","is_corresponding":false,"name":"Marti Hearst"}],"award":"","doi":"10.1109/TVCG.2023.3338451","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233338451","image_caption":"Left: Study stimuli consisted of line and bar charts that were derived from prior work and designed to have ambiguous prediction outcomes. The experiments varied the text position and text content for these charts; examples of these stimuli from both studies are shown behind the baseline charts. Right: Two tasks were studied with crowdsourced participants: prediction of the outcome of the trend, and assessment of the bias of the visualization author using the assessment questions shown.","keywords":["Visualization, text, annotation, perceived bias, judgment, prediction"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233338451/v-tvcg-20233338451_Preview.mp4?token=GDOcprxTiC1GFyxj-7-5YEpQVECOUAMrHfVYbVVwsQQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233338451/v-tvcg-20233338451_Preview.srt?token=uD8QUWlQuAWjv93HYRjzVjp9MRqXSA0I_2IHYxp7x_o&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-tvcg","session_youtube_ff_id":"zVf1a096Lj8","session_youtube_ff_link":"https://youtu.be/zVf1a096Lj8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=0h0m36s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T12:30:00Z","title":"The Role of Text in Visualizations: How Annotations Shape Perceptions of Bias and Influence Predictions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1281","abstract":"Participatory budgeting (PB) is a democratic approach to allocating municipal spending that has been adopted in many places in recent years, including in Chicago. Current PB voting resembles a ballot where residents are asked which municipal projects, such as school improvements and road repairs, to fund with a limited budget. In this work, we ask how interactive visualization can benefit PB by conducting a design probe-based interview study (N=13) with policy workers and academics with expertise in PB, urban planning, and civic HCI. Our probe explores how graphical elicitation of voter preferences and a dashboard of voting statistics can be incorporated into a realistic PB tool. Through qualitative analysis, we find that visualization creates opportunities for city government to set expectations about budget constraints while also granting their constituents greater freedom to articulate a wider range of preferences. However, using visualization to provide transparency about PB requires efforts to mitigate potential access barriers and mistrust. We call for more visualization professionals to help build civic capacity by working in and studying political systems.","accessible_pdf":true,"authors":[{"affiliations":["University of Chicago, Chicago, United States"],"email":"kalea@uchicago.edu","is_corresponding":true,"name":"Alex Kale"},{"affiliations":["University of Chicago, Chicago, United States"],"email":"danni6@uchicago.edu","is_corresponding":false,"name":"Danni Liu"},{"affiliations":["University of Chicago, Chicago, United States"],"email":"mariagabrielaa@uchicago.edu","is_corresponding":false,"name":"Maria Gabriela Ayala"},{"affiliations":["University of Chicago, Chicago, United States"],"email":"hwschwab@uchicago.edu","is_corresponding":false,"name":"Harper Schwab"},{"affiliations":["University of Washington, Seattle, United States","University of Utah, Salt Lake City, United States"],"email":"mcnutt.andrew@gmail.com","is_corresponding":false,"name":"Andrew M McNutt"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1281","image_caption":"An illustration of the application scenario for this work, participatory budgeting in Chicago. We investigate the roles that visualization can play in voting on how municipal funding should be spent on neighborhood projects and reporting results of the participatory budgeting vote to stakeholders.","keywords":["Visualization, Preference elicitation, Digital democracy"],"open_access_supplemental_link":"https://osf.io/tn6m2/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2407.20103","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1281/v-full-1281_Preview.mp4?token=y9YINAlWfIcoFGqsDl8_Cv6p7SfGysnOf5tLybgHOTU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1281/v-full-1281_Preview.srt?token=dmpHsfpn5XSrgmXauP9Z0qWOGW45CNrthyZEpM3D2o0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-full","session_youtube_ff_id":"Uwwba1Z9EbE","session_youtube_ff_link":"https://youtu.be/Uwwba1Z9EbE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=0h27m55s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T18:09:00Z","title":"What Can Interactive Visualization do for Participatory Budgeting in Chicago?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1438","abstract":"Differential privacy ensures the security of individual privacy but poses challenges to data exploration processes because the limited privacy budget incapacitates the flexibility of exploration and the noisy feedback of data requests leads to confusing uncertainty. In this study, we take the lead in describing corresponding exploration scenarios, including underlying requirements and available exploration strategies. To facilitate practical applications, we propose a visual analysis approach to the formulation of exploration strategies. Our approach applies a reinforcement learning model to provide diverse suggestions for exploration strategies according to the exploration intent of users. A novel visual design for representing uncertainty in correlation patterns is integrated into our prototype system to support the proposed approach. Finally, we implemented a user study and two case studies. The results of these studies verified that our approach can help develop strategies that satisfy the exploration intent of users.","accessible_pdf":false,"authors":[{"affiliations":["Nankai University, Tianjin, China"],"email":"wangxumeng@nankai.edu.cn","is_corresponding":true,"name":"Xumeng Wang"},{"affiliations":["Nankai University, Tianjin, China"],"email":"jiaoshuangcheng@mail.nankai.edu.cn","is_corresponding":false,"name":"Shuangcheng Jiao"},{"affiliations":["Arizona State University, Tempe, United States"],"email":"cbryan16@asu.edu","is_corresponding":false,"name":"Chris Bryan"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1438","image_caption":"Defogger augments the ability of humans to explore and gain increased value from data while adhering to constraints of Differential privacy.","keywords":["Differential privacy, Visual data analysis, Data exploration, Visualization for uncertainty illustration"],"open_access_supplemental_link":"https://github.com/Vanellope7/Defogger","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2407.19364","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1438/v-full-1438_Preview.mp4?token=uETUHjyLsHeNXUZg0czlJ92MqaE_uZaICE3o3vENo_A&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1438/v-full-1438_Preview.srt?token=YKeChoR-RmVNNjhdT2CpXwuYLiFnaKF2TXQe5ZTWlYE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-full","session_youtube_ff_id":"BDNvBU24Hls","session_youtube_ff_link":"https://youtu.be/BDNvBU24Hls","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=1h0m52s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T18:45:00Z","title":"Defogger: A Visual Analysis Approach for Data Exploration of Sensitive Data Protected by Differential Privacy","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1446","abstract":"This paper defines, analyzes, and discusses the emerging genre of visualization atlases. We currently witness an increase in web-based, data-driven initiatives that call themselves \u201catlases\u201d while explaining complex, contemporary issues through data and visualizations: climate change, sustainability, AI, or cultural discoveries. To understand this emerging genre and inform their design, study, and authoring support, we conducted a systematic analysis of 33 visualization atlases and semi-structured interviews with eight visualization atlas creators. Based on our results, we contribute (1) a definition of a visualization atlas as a compendium of (web) pages aimed at explaining and supporting exploration of data about a dedicated topic through data, visualizations and narration. (2) a set of design patterns of 8 design dimensions, (3) insights into the atlas creation from interviews and (4) the definition of 5 visualization atlas genres. We found that visualization atlases are unique in the way they combine i) exploratory visualization, ii) narrative elements from data-driven storytelling and iii) structured navigation mechanisms. They target a wide range of audiences with different levels of domain knowledge, acting as tools for study, communication, and discovery. We conclude with a discussion of current design practices and emerging questions around the ethics and potential real-world impact of visualization atlases, aimed to inform the design and study of visualization atlases.","accessible_pdf":false,"authors":[{"affiliations":["The University of Edinburgh, Edinburgh, United Kingdom"],"email":"jinrui.w@outlook.com","is_corresponding":true,"name":"Jinrui Wang"},{"affiliations":["Newcastle University, Newcastle Upon Tyne, United Kingdom"],"email":"xinhuan.shu@gmail.com","is_corresponding":false,"name":"Xinhuan Shu"},{"affiliations":["Inria, Bordeaux, France","University of Edinburgh, Edinburgh, United Kingdom"],"email":"bbach@inf.ed.ac.uk","is_corresponding":false,"name":"Benjamin Bach"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"uhinrich@ed.ac.uk","is_corresponding":false,"name":"Uta Hinrichs"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1446","image_caption":"An overview of the paper 'Visualization Atlases: Explaining and Exploring Complex Topics through Data, Visualization, and Narration' by Jinrui Wang, Xinhuan Shu, Benjamin Bach, and Ute Hinrichs, featuring a backdrop of selected covers from the visualization atlas cases analyzed in the survey.","keywords":["Visualization Atlases, Information Visualization, Data-driven Storytelling"],"open_access_supplemental_link":"https://vis-atlas.github.io","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1446/v-full-1446_Preview.mp4?token=gK7iNaMwM5fAFvMQve39FBaxshqihdeh3jM1dP0VQ34&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1446/v-full-1446_Preview.srt?token=amVtBvItJPrjZfE4PpsooM1gYrfxFwUYBhJAnKT5w2c&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-full","session_youtube_ff_id":"S5Pi7FB5Eek","session_youtube_ff_link":"https://youtu.be/S5Pi7FB5Eek","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=0h50m56s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T18:33:00Z","title":"Visualization Atlases: Explaining and Exploring Complex Topics through Data, Visualization, and Narration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1488","abstract":"A year ago, we submitted an IEEE VIS paper entitled \u201cSwaying the Public? Impacts of Election Forecast Visualizations on Emotion, Trust, and Intention in the 2022 U.S. Midterms\u201d [50], which was later bestowed with the honor of a best paper award. Yet, studying such a complex phenomenon required us to explore many more design paths than we could count, and certainly more than we could document in a single paper. This paper, then, is the unwritten prequel\u2014the backstory. It chronicles our journey from a simple idea\u2014to study visualizations for election forecasts\u2014through obstacles such as developing meaningfully different, easy-to-understand forecast visualizations, crafting professional-looking forecasts, and grappling with how to study perceptions of the forecasts before, during, and after the 2022 U.S. midterm elections. This journey yielded a rich set of original knowledge. We formalized a design space for two-party election forecasts, navigating through dimensions like data transformations, visual channels, and types of animated narratives. Through qualitative evaluation of ten representative prototypes with 13 participants, we then identi\ufb01ed six core insights into the interpretation of uncertainty visualizations in a U.S. election context. These insights informed our revisions to remove ambiguity in our visual encodings and to prepare a professional-looking forecasting website. As part of this story, we also distilled challenges faced and design lessons learned to inform both designers and practitioners. Ultimately, we hope our methodical approach could inspire others in the community to tackle the hard problems inherent to designing and evaluating visualizations for the general public.","accessible_pdf":true,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"fumeng.p.yang@gmail.com","is_corresponding":true,"name":"Fumeng Yang"},{"affiliations":["Northwestern University, Evanston, United States","Northwestern University, Evanston, United States"],"email":"mandicai2028@u.northwestern.edu","is_corresponding":false,"name":"Mandi Cai"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"chloemortenson2026@u.northwestern.edu","is_corresponding":false,"name":"Chloe Rose Mortenson"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"hoda@u.northwestern.edu","is_corresponding":false,"name":"Hoda Fakhari"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"aysedlokmanoglu@gmail.com","is_corresponding":false,"name":"Ayse Deniz Lokmanoglu"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"nicholas.diakopoulos@gmail.com","is_corresponding":false,"name":"Nicholas Diakopoulos"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"erik.nisbet@northwestern.edu","is_corresponding":false,"name":"Erik Nisbet"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1488","image_caption":"We iterated over numerous designs for the election forecast visualizations for the 2022 governor elections. This paper documents our journey, experiences, and lessons learned.","keywords":["Uncertainty visualization, probabilistic forecasts, design space, animation"],"open_access_supplemental_link":"https://www.doi.org/10.17605/osf.io/ygq2v","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.31219/osf.io/927vy","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1488/v-full-1488_Preview.mp4?token=YEVn09ch56y13mFON0apbM7OHxXyXR0MIoh07wLauQs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1488/v-full-1488_Preview.srt?token=EIKBFxA2WYMJZ5-WNVd7RX0F558nVKQtCGmnsQFM1Ic&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-full","session_youtube_ff_id":"haLpw_OzpFw","session_youtube_ff_link":"https://youtu.be/haLpw_OzpFw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=0h39m1s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T18:21:00Z","title":"The Backstory to \u201cSwaying the Public\u201d: A Design Chronicle of Election Forecast Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233287585","abstract":"Data visualization and journalism are deeply connected. From early infographics to recent data-driven storytelling, visualization has become an integrated part of contemporary journalism, primarily as a communication artifact to inform the general public. Data journalism, harnessing the power of data visualization, has emerged as a bridge between the growing volume of data and our society. Visualization research that centers around data storytelling has sought to understand and facilitate such journalistic endeavors. However, a recent metamorphosis in journalism has brought broader challenges and opportunities that extend beyond mere communication of data. We present this article to enhance our understanding of such transformations and thus broaden visualization research's scope and practical contribution to this evolving field. We first survey recent significant shifts, emerging challenges, and computational practices in journalism. We then summarize six roles of computing in journalism and their implications. Based on these implications, we provide propositions for visualization research concerning each role. Ultimately, by mapping the roles and propositions onto a proposed ecological model and contextualizing existing visualization research, we surface seven general topics and a series of research agendas that can guide future visualization research at this intersection.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yu Fu"},{"affiliations":"","email":"","is_corresponding":false,"name":"John Stasko"}],"award":"","doi":"10.1109/TVCG.2023.3287585","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233287585","image_caption":"This diagram highlights the intersection of journalism and visualization, focusing on Six Roles of Computing in Journalism: Facilitator, Analyzer, Communicator, Public Forum, Automator, and Auditor. It outlines key transformations in journalism, like interactive and personalized news, and explores computational practices such as data journalism and computer-assisted reporting. The diagram also proposes seven research topics to advance visualization's role in journalism, including combating misinformation and supporting analytical tasks. The aim is to contextualize visualization's value in addressing emerging challenges and enhancing journalistic practices. ","keywords":["Computational journalism,data visualization,data-driven storytelling, journalism"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233287585/v-tvcg-20233287585_Preview.mp4?token=_kT4xh5FeUt7w5Nhf5xrZIZbU3GAPDjkCZH90GmLwcY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233287585/v-tvcg-20233287585_Preview.srt?token=L-kBpKwLlfShttlv4uMAZZRYshD-haWVX6NJnJKx1a8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-tvcg","session_youtube_ff_id":"vhoQGLEX1W8","session_youtube_ff_link":"https://youtu.be/vhoQGLEX1W8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=0h0m5s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T17:45:00Z","title":"More Than Data Stories: Broadening the Role of Visualization in Contemporary Journalism","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243355884","abstract":"News articles containing data visualizations play an important role in informing the public on issues ranging from public health to politics. Recent research on the persuasive appeal of data visualizations suggests that prior attitudes can be notoriously difficult to change. Inspired by an NYT article, we designed two experiments to evaluate the impact of elicitation and contrasting narratives on attitude change, recall, and engagement. We hypothesized that eliciting prior beliefs leads to more elaborative thinking that ultimately results in higher attitude change, better recall, and engagement. Our findings revealed that visual elicitation leads to higher engagement in terms of feelings of surprise. While there is an overall attitude change across all experiment conditions, we did not observe a significant effect of belief elicitation on attitude change. With regard to recall error, while participants in the draw trend elicitation exhibited significantly lower recall error than participants in the categorize trend condition, we found no significant difference in recall error when comparing elicitation conditions to no elicitation. In a follow-up study, we added contrasting narratives with the purpose of making the main visualization (communicating data on the focal issue) appear strikingly different. Compared to the results of Study 1, we found that contrasting narratives improved engagement in terms of surprise and interest but interestingly resulted in higher recall error and no significant change in attitude. We discuss the effects of elicitation and contrasting narratives in the context of topic involvement and the strengths of temporal trends encoded in the data visualization.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Milad Rogha"},{"affiliations":"","email":"","is_corresponding":false,"name":"Subham Sah"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alireza Karduni"},{"affiliations":"","email":"","is_corresponding":false,"name":"Douglas Markant"},{"affiliations":"","email":"","is_corresponding":false,"name":"Wenwen Dou"}],"award":"","doi":"10.1109/TVCG.2024.3355884","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243355884","image_caption":"Data visualizations in news articles not only inform but also play a crucial role in shaping public opinion on important issues. Can data visualization researchers and designers \u2018nudge\u2019 people toward more elaborative thinking? Inspired by a New York Times article, we conducted two experiments to explore how eliciting prior beliefs and contrasting narratives influence engagement, attitude change, and recall.","keywords":["Data Visualization, Market Research, Visualization, Uncertainty, Data Models, Correlation, Attitude Control, Belief Elicitation, Visual Elicitation, Data Visualization, Contrasting Narratives"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2401.05511","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243355884/v-tvcg-20243355884_Preview.mp4?token=nw4kl_fRHfxOfS0H7UuBVjdlxGqlCmM-pK__biskZ4I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243355884/v-tvcg-20243355884_Preview.srt?token=JEfeNpbl8wrsDFdI5umzIeceGgHcse6UJpJmkBCfq0A&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-tvcg","session_youtube_ff_id":"iryPS3aExhY","session_youtube_ff_link":"https://youtu.be/iryPS3aExhY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=0h14m48s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T17:57:00Z","title":"The Impact of Elicitation and Contrasting Narratives on Engagement, Recall and Attitude Change with News Articles Containing Data Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1121","abstract":"Acute stroke demands prompt diagnosis and treatment to achieve optimal patient outcomes. However, the intricate and irregular nature of clinical data associated with acute stroke, particularly blood pressure (BP) measurements, presents substantial obstacles to effective visual analytics and decision-making. Through a year-long collaboration with experienced neurologists, we developed PhenoFlow, a visual analytics system that leverages the collaboration between human and Large Language Models (LLMs) to analyze the extensive and complex data of acute ischemic stroke patients. PhenoFlow pioneers an innovative workflow, where the LLM serves as a data wrangler while neurologists explore and supervise the output using visualizations and natural language interactions. This approach enables neurologists to focus more on decision-making with reduced cognitive load. To protect sensitive patient information, PhenoFlow only utilizes metadata to make inferences and synthesize executable codes, without accessing raw patient data. This ensures that the results are both reproducible and interpretable while maintaining patient privacy. The system incorporates a slice-and-wrap design that employs temporal folding to create an overlaid circular visualization. Combined with a linear bar graph, this design aids in exploring meaningful patterns within irregularly measured BP data. Through case studies, PhenoFlow has demonstrated its capability to support iterative analysis of extensive clinical datasets, reducing cognitive load and enabling neurologists to make well-informed decisions. Grounded in long-term collaboration with domain experts, our research demonstrates the potential of utilizing LLMs to tackle current challenges in data-driven clinical decision-making for acute ischemic stroke patients.","accessible_pdf":false,"authors":[{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"jykim@hcil.snu.ac.kr","is_corresponding":true,"name":"Jaeyoung Kim"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"sihyeon@hcil.snu.ac.kr","is_corresponding":false,"name":"Sihyeon Lee"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"hj@hcil.snu.ac.kr","is_corresponding":false,"name":"Hyeon Jeon"},{"affiliations":["Korea University Guro Hospital, Seoul, Korea, Republic of"],"email":"gooday19@gmail.com","is_corresponding":false,"name":"Keon-Joo Lee"},{"affiliations":["Hankuk University of Foreign Studies, Yongin-si, Korea, Republic of"],"email":"bkim@hufs.ac.kr","is_corresponding":false,"name":"Bohyoung Kim"},{"affiliations":["Seoul National University Bundang Hospital, Seongnam, Korea, Republic of"],"email":"braindoc@snu.ac.kr","is_corresponding":false,"name":"HEE JOON"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"jseo@snu.ac.kr","is_corresponding":false,"name":"Jinwook Seo"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1121","image_caption":"PhenoFlow empowers neurologists to explore large and complex stroke datasets with reduced cognitive load. (A) The cohort construction component allows neurologists to define target cohorts using natural language. (B) The Visual Inspection View provides plain-language explanations and small multiples of relevant fields to debug LLM data wrangler behavior. (C) The Cohort View summarizes (C1) cohort relationships in a node-link diagram and (C2) each patient's blood pressure (BP) trajectories as matrix visualization. (C3) Natural language filtering supports iterative cohort exploration. (D1) Linear bar charts and (D2) slice-and-wrap visualization present BP trajectories as time-series, revealing triangular patterns in irregularly measured BP data.","keywords":["Stroke, Irregularly spaced time-series data, Multi-dimensional data, Cohort analysis, Large language models"],"open_access_supplemental_link":"https://osf.io/q6yc4/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.16329","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1121/v-full-1121_Preview.mp4?token=lSkRirWnWHhBzgGO2eXgj7s4VeGJqJXdwF--jEY_GC8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1121/v-full-1121_Preview.srt?token=hwvYBVFbHn0wLpZURC3KYGxqDt__oD8g1j19mCZQKZk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-full","session_youtube_ff_id":"K9vSYLsemPM","session_youtube_ff_link":"https://youtu.be/K9vSYLsemPM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h13m16s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T16:12:00Z","title":"PhenoFlow: A Human-LLM Driven Visual Analytics System for Exploring Large and Complex Stroke Datasets","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1474","abstract":"Recent advancements in Large Language Models (LLMs) and Prompt Engineering have made chatbot customization more accessible, significantly reducing barriers to tasks that previously required programming skills. However, prompt evaluation, especially at the dataset scale, remains complex due to the need to assess prompts across thousands of test instances within a dataset. Our study, based on a comprehensive literature review and pilot study, summarized five critical challenges in prompt evaluation. In response, we introduce a feature-oriented workflow for systematic prompt evaluation. In the context of text summarization, our workflow advocates evaluation with summary characteristics (feature metrics) such as complexity, formality, or naturalness, instead of using traditional quality metrics like ROUGE. This design choice enables a more user-friendly evaluation of prompts, as it guides users in sorting through the ambiguity inherent in natural language. To support this workflow, we introduce Awesum, a visual analytics system that facilitates identifying optimal prompt refinements for text summarization through interactive visualizations, featuring a novel Prompt Comparator design that employs a BubbleSet-inspired design enhanced by dimensionality reduction techniques. We evaluate the xeffectiveness and general applicability of the system with practitioners from various domains and found that (1) our design helps overcome the learning curve for non-technical people to conduct a systematic evaluation of summarization prompts, and (2) our feature-oriented workflow has the potential to generalize to other NLG and image-generation tasks. For future works, we advocate moving towards feature-oriented evaluation of LLM prompts and discuss unsolved challenges in terms of human-agent interaction.","accessible_pdf":false,"authors":[{"affiliations":["University of California Davis, Davis, United States"],"email":"ytlee@ucdavis.edu","is_corresponding":true,"name":"Sam Yu-Te Lee"},{"affiliations":["University of California, Davis, Davis, United States"],"email":"abahukhandi@ucdavis.edu","is_corresponding":false,"name":"Aryaman Bahukhandi"},{"affiliations":["University of California at Davis, Davis, United States"],"email":"dyuliu@ucdavis.edu","is_corresponding":false,"name":"Dongyu Liu"},{"affiliations":["University of California at Davis, Davis, United States"],"email":"ma@cs.ucdavis.edu","is_corresponding":false,"name":"Kwan-Liu Ma"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1474","image_caption":"Bubble Plot, the key visualization in Awesum, designed to show prompt performance. Yellow curves suggest improvements, and purple curves suggest deterioration. The image suggests a mixed performance. ","keywords":["Visual analytics, prompt engineering, text summarization, human-computer interaction, dimensionality reduction"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.12192","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1474/v-full-1474_Preview.mp4?token=8Hfco-UGUujuyZG-6mVef9LTBGpU15pw5Etlf-Jd148&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1474/v-full-1474_Preview.srt?token=BJaRDhTR-831s4wGTxWJtiiJw0QfZMYQkc2ymh0z7nY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-full","session_youtube_ff_id":"H4QzA6XFPFs","session_youtube_ff_link":"https://youtu.be/H4QzA6XFPFs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h43m52s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T16:48:00Z","title":"Towards Dataset-scale and Feature-oriented Evaluation of Text Summarization in Large Language Model Prompts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1504","abstract":"A wide range of visualization authoring interfaces enable the creation of highly customized visualizations. However, prioritizing expressiveness often impedes the learnability of the authoring interface. The diversity of users, such as varying computational skills and prior experiences in user interfaces, makes it even more challenging for a single authoring interface to satisfy the needs of a broad audience. In this paper, we introduce a framework to balance learnability and expressivity in a visualization authoring system. Adopting insights from learnability studies, such as multimodal interaction and visualization literacy, we explore the design space of blending multiple visualization authoring interfaces for supporting authoring tasks in a complementary and flexible manner. To evaluate the effectiveness of blending interfaces, we implemented a proof-of-concept system, Blace, that combines four common visualization authoring interfaces\u2014template-based, shelf configuration, natural language, and code editor\u2014that are tightly linked to one another to help users easily relate unfamiliar interfaces to more familiar ones. Using the system, we conducted a user study with 12 domain experts who regularly visualize genomics data as part of their analysis workflow. Participants with varied visualization and programming backgrounds were able to successfully reproduce unfamiliar visualization examples without a guided tutorial in the study. Feedback from a post-study qualitative questionnaire further suggests that blending interfaces enabled participants to learn the system easily and assisted them in confidently editing unfamiliar visualization grammar in the code editor, enabling expressive customization. Reflecting on our study results and the design of our system, we discuss the different interaction patterns that we identified and design implications for blending visualization authoring interfaces.","accessible_pdf":true,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"sehi_lyi@hms.harvard.edu","is_corresponding":true,"name":"Sehi L'Yi"},{"affiliations":["Eindhoven University of Technology, Eindhoven, Netherlands"],"email":"a.v.d.brandt@tue.nl","is_corresponding":false,"name":"Astrid van den Brandt"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"etowah_adams@hms.harvard.edu","is_corresponding":false,"name":"Etowah Adams"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"huyen_nguyen@hms.harvard.edu","is_corresponding":false,"name":"Huyen N. Nguyen"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1504","image_caption":"The trade-off between learnability and expressivity has been discussed as an important design consideration for visualization authoring systems. We present Blended Interfaces, a framework for combining multiple authoring interfaces in a complementary way to balance learnability and expressivity.","keywords":["Visualization authoring, blended interfaces, genomics data visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/pjcn4","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1504/v-full-1504_Preview.mp4?token=jFGXnF-YKBBGg2rqeIRI8fjQS13h17VKJfyxu12UsP8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1504/v-full-1504_Preview.srt?token=Rt-H1phZ7gxeNNZf9tIRQ-LGT0o9WudtW8kWLqMlzpU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-full","session_youtube_ff_id":"IL0N2WMISlg","session_youtube_ff_link":"https://youtu.be/IL0N2WMISlg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h0m57s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T16:00:00Z","title":"Learnable and Expressive Visualization Authoring Through Blended Interfaces","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243368060","abstract":"Visual analytics supports data analysis tasks within complex domain problems. However, due to the richness of data types, visual designs, and interaction designs, users need to recall and process a significant amount of information when they visually analyze data. These challenges emphasize the need for more intelligent visual analytics methods. Large language models have demonstrated the ability to interpret various forms of textual data, offering the potential to facilitate intelligent support for visual analytics. We propose LEVA, a framework that uses large language models to enhance users' VA workflows at multiple stages: onboarding, exploration, and summarization. To support onboarding, we use large language models to interpret visualization designs and view relationships based on system specifications. For exploration, we use large language models to recommend insights based on the analysis of system status and data to facilitate mixed-initiative exploration. For summarization, we present a selective reporting strategy to retrace analysis history through a stream visualization and generate insight reports with the help of large language models. We demonstrate how LEVA can be integrated into existing visual analytics systems. Two usage scenarios and a user study suggest that LEVA effectively aids users in conducting visual analytics.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yuheng Zhao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yixing Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yu Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xinyi Zhao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Junjie Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zekai Shao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Cagatay Turkay"},{"affiliations":"","email":"","is_corresponding":false,"name":"Siming Chen"}],"award":"","doi":"10.1109/TVCG.2024.3368060","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243368060","image_caption":"LEVA is a framework that uses large language models to enhance users' VA workflows at multiple stages: onboarding, exploration, and summarization. An implementation of LEVA comprises four components: (A) Users can communicate with LLMs and control the insight annotations in the Chat view; (B) The recommended insights for the next step of analysis from LLMs are updated in the Original system view; (C) Users can retrace the interaction history in the Interaction stream view; (D) Once a historical analysis path is selected, the generated insight report will display in the Report view.","keywords":["Insight recommendation, mixed-initiative, interface agent, large language models, visual analytics"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2403.05816","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243368060/v-tvcg-20243368060_Preview.mp4?token=5FAfObJ8DIokplHb5XdWL3faFT4HuWTWbZ2VOvHgU4Q&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-tvcg","session_youtube_ff_id":"Dy0M7rbwbwo","session_youtube_ff_link":"https://youtu.be/Dy0M7rbwbwo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h24m57s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T16:24:00Z","title":"LEVA: Using Large Language Models to Enhance Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243368621","abstract":"The use of natural language interfaces (NLIs) to create charts is becoming increasingly popular due to the intuitiveness of natural language interactions. One key challenge in this approach is to accurately capture user intents and transform them to proper chart specifications. This obstructs the wide use of NLI in chart generation, as users' natural language inputs are generally abstract (i.e., ambiguous or under-specified), without a clear specification of visual encodings. Recently, pre-trained large language models (LLMs) have exhibited superior performance in understanding and generating natural language, demonstrating great potential for downstream tasks. Inspired by this major trend, we propose ChartGPT, generating charts from abstract natural language inputs. However, LLMs are struggling to address complex logic problems. To enable the model to accurately specify the complex parameters and perform operations in chart generation, we decompose the generation process into a step-by-step reasoning pipeline, so that the model only needs to reason a single and specific sub-task during each run. Moreover, LLMs are pre-trained on general datasets, which might be biased for the task of chart generation. To provide adequate visualization knowledge, we create a dataset consisting of abstract utterances and charts and improve model performance through fine-tuning. We further design an interactive interface for ChartGPT that allows users to check and modify the intermediate outputs of each step. The effectiveness of the proposed system is evaluated through quantitative evaluations and a user study.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yuan Tian"},{"affiliations":"","email":"","is_corresponding":false,"name":"Weiwei Cui"},{"affiliations":"","email":"","is_corresponding":false,"name":"Dazhen Deng"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xinjing Yi"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yurun Yang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haidong Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"10.1109/TVCG.2024.3368621","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243368621","image_caption":"ChartGPT overview. ChartGPT takes a data table and an utterance provided by the user as input (a). To generate the chart, ChartGPT employs a step-by-step transformation process (b) that decomposes the chart generation task into six sequential steps (b1). Each step is solved by the LLM fine-tuned on our constructed dataset (b2). By leveraging the output from each step, ChartGPT generates visualization specifications and presents charts to the user (c).","keywords":["Natural language interfaces, large language models, data visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243368621/v-tvcg-20243368621_Preview.mp4?token=NNF1tI9DBJHzeUfhANxgTkE_e5Wdnhf03qWc96g5uIc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243368621/v-tvcg-20243368621_Preview.srt?token=JSh5JMzUk880YDcFVfesTjDI-6QZABZFEW1cy5mL7Ps&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-tvcg","session_youtube_ff_id":"aOjbYmdr5Y0","session_youtube_ff_link":"https://youtu.be/aOjbYmdr5Y0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h37m47s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T16:36:00Z","title":"ChartGPT: Leveraging LLMs to Generate Charts from Abstract Natural Language","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243408255","abstract":"Generative text-to-image models, which allow users to create appealing images through a text prompt, have seen a dramatic increase in popularity in recent years. However, most users have a limited understanding of how such models work and often rely on trial and error strategies to achieve satisfactory results. The prompt history contains a wealth of information that could provide users with insights into what has been explored and how the prompt changes impact the output image, yet little research attention has been paid to the visual analysis of such process to support users. We propose the Image Variant Graph, a novel visual representation designed to support comparing prompt-image pairs and exploring the editing history. The Image Variant Graph models prompt differences as edges between corresponding images and presents the distances between images through projection. Based on the graph, we developed the PrompTHis system through co-design with artists. Based on the review and analysis of the prompting history, users can better understand the impact of prompt changes and have a more effective control of image generation. A quantitative user study and qualitative interviews demonstrate that PrompTHis can help users review the prompt history, make sense of the model, and plan their creative process.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yuhan Guo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hanning Shao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Can Liu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kai Xu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xiaoru Yuan"}],"award":"","doi":"10.1109/TVCG.2024.3408255","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243408255","image_caption":"When using text-to-image generative models, users might spend a lot of time in trials and errors. PrompTHis is a visual interactive system that supports users to understand how the models work through exploring prompt history. It consists of a novel Image Variant Graph presents how specific word modifications affect the model's outputs and a history box that shows the attempts in temporal order. The figure shows the prompting records of an artist. Starting from a black-and-white drawing of city buildings (1-5), the artist experimented with color styles (6-7, 8-10), and returned to the black-and-white style (11-14), with \u201catomic explosion\u201d inserted later (15).","keywords":["Text visualization, image visualization, text-to-image generation, editing history, provenance, generative art"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2403.09615","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243408255/v-tvcg-20243408255_Preview.mp4?token=keSV0ZsZjbxswmhd2djgTyYh3KPJ_LcbgLW7qmu0kRA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243408255/v-tvcg-20243408255_Preview.srt?token=H_KJT2BEr0QXj2brQhZcW1C4mJEGqg1WlEgPGKBSH40&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-tvcg","session_youtube_ff_id":"fMwAACKA6oA","session_youtube_ff_link":"https://youtu.be/fMwAACKA6oA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h57m27s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T17:00:00Z","title":"PrompTHis: Visualizing the Process and Influence of Prompt Editing during Text-to-Image Creation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1067","abstract":"Large Language Models (LLMs) are powerful but also raise significant security concerns, particularly regarding the harm they can cause, such as generating fake news that manipulates public opinion on social media and providing responses to unethical activities. Traditional red teaming approaches for identifying AI vulnerabilities rely on manual prompt construction and expertise. This paper introduces AdversaFlow, a novel visual analytics system designed to enhance LLM security against adversarial attacks through human-AI collaboration. AdversaFlow involves adversarial training between a target model and a red model, featuring unique multi-level adversarial flow and fluctuation path visualizations. These features provide insights into adversarial dynamics and LLM robustness, enabling experts to identify and mitigate vulnerabilities effectively. We present quantitative evaluations and case studies validating our system's utility and offering insights for future AI security solutions. Our method can enhance LLM security, supporting downstream scenarios like social media regulation by enabling more effective detection, monitoring, and mitigation of harmful content and behaviors.","accessible_pdf":true,"authors":[{"affiliations":["Zhejiang University, Ningbo, China"],"email":"dengdazhen@zju.edu.cn","is_corresponding":true,"name":"Dazhen Deng"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"zhangchuhan024@163.com","is_corresponding":false,"name":"Chuhan Zhang"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"huawzheng@gmail.com","is_corresponding":false,"name":"Huawei Zheng"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"yw.pu@zju.edu.cn","is_corresponding":false,"name":"Yuwen Pu"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"sji@zju.edu.cn","is_corresponding":false,"name":"Shouling Ji"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1067","image_caption":"The interface of AdversaFlow includes a Control Panel (A) to configure model parameters and adjust data sampling, an Embedding View (B) to show the projection of prompts, a Metric Monitor (C) displaying the key performance indicators of the model, an Adversarial Flow to facilitate multi-level exploration of models, an Instance List (E) showing prompt details, and a Flucutaion View (F) for the investigation of token-level uncertainty.","keywords":["Visual Analytics for Machine Learning, Artificial Intelligence Security, Large Language Models, Text Visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1067/v-full-1067_Preview.mp4?token=WdJwebGqoxqRl2J3o3aTwy7jdlbRADEynOHmmZ55jE4&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"NWnvzefxILM","session_youtube_ff_link":"https://youtu.be/NWnvzefxILM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h0m54s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T12:30:00Z","title":"AdversaFlow: Visual Red Teaming for Large Language Models with Multi-Level Adversarial Flow","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1096","abstract":"Large Language Models (LLMs) have shown great potential in intelligent visualization systems, especially for domain-specific applications. Integrating LLMs into visualization systems presents challenges, and we categorize these challenges into three alignments: domain problems with LLMs, visualization with LLMs, and interaction with LLMs. To achieve these alignments, we propose a framework and outline a workflow to guide the application of fine-tuned LLMs to enhance visual interactions for domain-specific tasks. These alignment challenges are critical in education because of the need for an intelligent visualization system to support beginners' self-regulated learning. Therefore, we apply the framework to education and introduce Tailor-Mind, an interactive visualization system designed to facilitate self-regulated learning for artificial intelligence beginners. Drawing on insights from a preliminary study, we identify self-regulated learning tasks and fine-tuning objectives to guide visualization design and tuning data construction. Our focus on aligning visualization with fine-tuned LLM makes Tailor-Mind more like a personalized tutor. Tailor-Mind also supports interactive recommendations to help beginners better achieve their learning goals. Model performance evaluations and user studies confirm that Tailor-Mind improves the self-regulated learning experience, effectively validating the proposed framework.","accessible_pdf":false,"authors":[{"affiliations":["Fudan University, Shanghai, China"],"email":"lgao.lynne@gmail.com","is_corresponding":true,"name":"Lin Gao"},{"affiliations":["Fudan University, ShangHai, China"],"email":"kingluther6666@gmail.com","is_corresponding":false,"name":"Jing Lu"},{"affiliations":["Fudan University, Shanghai, China"],"email":"gemini25szk@gmail.com","is_corresponding":false,"name":"Zekai Shao"},{"affiliations":["Fudan University, Shanghai, China"],"email":"ziyuelin917@gmail.com","is_corresponding":false,"name":"Ziyue Lin"},{"affiliations":["Fudan unversity, ShangHai, China"],"email":"sbyue23@m.fudan.edu.cn","is_corresponding":false,"name":"Shengbin Yue"},{"affiliations":["Fudan University, Shanghai, China"],"email":"chiokit0819@gmail.com","is_corresponding":false,"name":"Chiokit Ieong"},{"affiliations":["Fudan University, Shanghai, China"],"email":"21307130094@m.fudan.edu.cn","is_corresponding":false,"name":"Yi Sun"},{"affiliations":["University of Vienna, Vienna, Austria"],"email":"rory.james.zauner@univie.ac.at","is_corresponding":false,"name":"Rory Zauner"},{"affiliations":["Fudan University, Shanghai, China"],"email":"zywei@fudan.edu.cn","is_corresponding":false,"name":"Zhongyu Wei"},{"affiliations":["Fudan University, Shanghai, China"],"email":"simingchen3@gmail.com","is_corresponding":false,"name":"Siming Chen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1096","image_caption":"In applying workflow to Self-Regulated Learning (SRL) in education, we outline the process in three phases. Phase 1 involves establishing a fundamental understanding of the SRL task (A1) and collecting data on artificial intelligence (A2). The design requirements (B) align with the design requirements. Phase 2 details the SRL pipeline sub-tasks and visualizations (C1), leading to the creation of fine-tuning data (C2). In phase 3, we enhance the fine-tuning effects and visualization interactions by integrating user feedback within the visualization system. ","keywords":["Fine-tuned large language model, visualization system, self-regulated learning, intelligent tutorial system"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.20570","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1096/v-full-1096_Preview.mp4?token=SgWKRslsK3PCv0V7DHhhG02KJ8saIlA87RUWJ-i9jKg&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"KR_r6ARzzx0","session_youtube_ff_link":"https://youtu.be/KR_r6ARzzx0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h27m47s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T12:54:00Z","title":"Fine-Tuned Large Language Model for Visualization System: A Study on Self-Regulated Learning in Education","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1193","abstract":"Emerging multimodal large language models (MLLMs) exhibit great potential for chart question answering (CQA). Recent efforts primarily focus on scaling up training datasets (i.e., charts, data tables, and question-answer (QA) pairs) through data collection and synthesis. However, our empirical study on existing MLLMs and CQA datasets reveals notable gaps. First, current data collection and synthesis focus on data volume and lack consideration of fine-grained visual encodings and QA tasks, resulting in unbalanced data distribution divergent from practical CQA scenarios. Second, existing work follows the training recipe of the base MLLMs initially designed for natural images, under-exploring the adaptation to unique chart characteristics, such as rich text elements. To fill the gap, we propose a visualization-referenced instruction tuning approach to guide the training dataset enhancement and model development. Specifically, we propose a novel data engine to effectively filter diverse and high-quality data from existing datasets and subsequently refine and augment the data using LLM-based generation techniques to better align with practical QA tasks and visual encodings. Then, to facilitate the adaptation to chart characteristics, we utilize the enriched data to train an MLLM by unfreezing the vision encoder and incorporating a mixture-of-resolution adaptation strategy for enhanced fine-grained recognition. Experimental results validate the effectiveness of our approach. Even with fewer training examples, our model consistently outperforms state-of-the-art CQA models on established benchmarks. We also contribute a dataset split as a benchmark for future research. Source codes and datasets of this paper are available at https://github.com/zengxingchen/ChartQA-MLLM.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"xingchen.zeng@outlook.com","is_corresponding":true,"name":"Xingchen Zeng"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"hlin386@connect.hkust-gz.edu.cn","is_corresponding":false,"name":"Haichuan Lin"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"yyebd@connect.ust.hk","is_corresponding":false,"name":"Yilin Ye"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China","The Hong Kong University of Science and Technology, Hong Kong SAR, China"],"email":"weizeng@hkust-gz.edu.cn","is_corresponding":false,"name":"Wei Zeng"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1193","image_caption":"Comparison of our model with state-of-the-art MLLMs on chart question answering. Existing MLLMs often fail to understand visual mappings, such as inverted Y-axis, truncated axis, bubble sizing, and area stacking. In contrast, our model, trained with the visualization-referenced dataset we constructed, showcases a better understanding of visualization domain knowledge.","keywords":["Chart-question answering, multimodal large language models, benchmark"],"open_access_supplemental_link":"https://github.com/zengxingchen/ChartQA-MLLM","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.20174","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1193/v-full-1193_Preview.mp4?token=g7956VuPuX9qXmOO9QhCifFhYpHqQxRR8sdHM3xSlXQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1193/v-full-1193_Preview.srt?token=3nmqQ4mX2ixqqnQRhaSQNz-6xKRYFtzLC9_Vv-EOzcg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"fiE38Zyk9VY","session_youtube_ff_link":"https://youtu.be/fiE38Zyk9VY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h40m31s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T13:06:00Z","title":"Advancing Multimodal Large Language Models in Chart Question Answering with Visualization-Referenced Instruction Tuning","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1326","abstract":"Evaluating large language models (LLMs) presents unique challenges. While automatic side-by-side evaluation, also known as LLM-as-a-judge, has become a promising solution, model developers and researchers face difficulties with scalability and interpretability when analyzing these evaluation outcomes. To address these challenges, we introduce LLM Comparator, a new visual analytics tool designed for side-by-side evaluations of LLMs. This tool provides analytical workflows that help users understand when and why one LLM outperforms or underperforms another, and how their responses differ. Through close collaboration with practitioners developing LLMs at Google, we have iteratively designed, developed, and refined the tool. Qualitative feedback from these users highlights that the tool facilitates in-depth analysis of individual examples while enabling users to visually overview and flexibly slice data. This empowers users to identify undesirable patterns, formulate hypotheses about model behavior, and gain insights for model improvement. LLM Comparator has been integrated into Google's LLM evaluation platforms and open-sourced.","accessible_pdf":false,"authors":[{"affiliations":["Google, Atlanta, United States"],"email":"minsuk.kahng@gmail.com","is_corresponding":true,"name":"Minsuk Kahng"},{"affiliations":["Google Research, Seattle, United States"],"email":"iftenney@google.com","is_corresponding":false,"name":"Ian Tenney"},{"affiliations":["Google Research, Cambridge, United States"],"email":"mahimap@google.com","is_corresponding":false,"name":"Mahima Pushkarna"},{"affiliations":["Google Research, Pittsburgh, United States"],"email":"lxieyang.cmu@gmail.com","is_corresponding":false,"name":"Michael Xieyang Liu"},{"affiliations":["Google Research, Cambridge, United States"],"email":"jwexler@google.com","is_corresponding":false,"name":"James Wexler"},{"affiliations":["Google, Cambridge, United States"],"email":"ereif@google.com","is_corresponding":false,"name":"Emily Reif"},{"affiliations":["Google Research, Mountain View, United States"],"email":"kallarackal@google.com","is_corresponding":false,"name":"Krystal Kallarackal"},{"affiliations":["Google Research, Seattle, United States"],"email":"minsuk.cs@gmail.com","is_corresponding":false,"name":"Minsuk Chang"},{"affiliations":["Google, Cambridge, United States"],"email":"michaelterry@google.com","is_corresponding":false,"name":"Michael Terry"},{"affiliations":["Google, Paris, France"],"email":"ldixon@google.com","is_corresponding":false,"name":"Lucas Dixon"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1326","image_caption":"LLM Comparator is a visual analytics tool consisting of multiple views: an interactive table which displays individual prompts and model responses, and a visualization summary which comprises multiple panels, including score distribution, metrics by prompt category, rationale clusters, n-grams, and custom functions.","keywords":["Visual analytics, large language models, model evaluation, responsible AI, machine learning interpretability."],"open_access_supplemental_link":"https://github.com/PAIR-code/llm-comparator","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1326/v-full-1326_Preview.mp4?token=GYD8fWb2Fu3OrvKSiArk2XzM-LoMhtGk3WbHqeN0LKM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1326/v-full-1326_Preview.srt?token=TQ8-neIoCwJpayqgGoorO3YdwC5UL97fg-L_QNP5Qr0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"DVHN9srNTkk","session_youtube_ff_link":"https://youtu.be/DVHN9srNTkk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h14m53s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T12:42:00Z","title":"LLM Comparator: Interactive Analysis of Side-by-Side Evaluation of Large Language Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1503","abstract":"The increasing reliance on Large Language Models (LLMs) for health information seeking can pose severe risks due to the potential for misinformation and the complexity of these topics. This paper introduces KnowNet, a visualization system that integrates LLMs with Knowledge Graphs (KG) to provide enhanced accuracy and structured exploration. One core idea in KnowNet is to conceptualize the understanding of a subject as the gradual construction of graph visualization, aligning the user's cognitive process with both the structured data in KGs and the unstructured outputs from LLMs. Specifically, we extracted triples (e.g., entities and their relations) from LLM outputs and mapped them into the validated information and supported evidence in external KGs. Based on the neighborhood of the currently explored entities in KGs, KnowNet provides recommendations for further inquiry, aiming to guide a comprehensive understanding without overlooking critical aspects. A progressive graph visualization is proposed to show the alignment between LLMs and KGs, track previous inquiries, and connect this history with current queries and next-step recommendations. We demonstrate the effectiveness of our system via use cases and expert interviews.","accessible_pdf":false,"authors":[{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"yan00111@umn.edu","is_corresponding":false,"name":"Youfu Yan"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"hou00127@umn.edu","is_corresponding":false,"name":"Yu Hou"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"xiao0290@umn.edu","is_corresponding":false,"name":"Yongkang Xiao"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"zhan1386@umn.edu","is_corresponding":false,"name":"Rui Zhang"},{"affiliations":["University of Minnesota, Minneapolis , United States"],"email":"qianwen@umn.edu","is_corresponding":true,"name":"Qianwen Wang"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1503","image_caption":"In contrast to traditional LLM question-answering, which often generate lengthy and unverified text, KNOWNET leverages external knowledge graph (KG) to enhance health information seeking with LLM. KNOWNET provides validation through literature for accuracy, next-step recommendations for comprehensive exploration, and step-by-step graph visualization for a progressive understanding of the topic.","keywords":["Human-AI interactions, knowledge graph, conversational agent, large language model, progressive visualization"],"open_access_supplemental_link":"https://visual-intelligence-umn.github.io/KNOWNET/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.13598","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1503/v-full-1503_Preview.mp4?token=DCKH1_qnxfb-cM6f17TTxp3U6FmGK7tYd4K1adE1Oy8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1503/v-full-1503_Preview.srt?token=qNHGukqjokAVMKtDPxeimr-sum2x_sotCz1_PJ6Vf2s&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"_eV967qYScs","session_youtube_ff_link":"https://youtu.be/_eV967qYScs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h52m25s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T13:30:00Z","title":"Guided Health-related Information Seeking from LLMs via Knowledge Graph Integration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1544","abstract":"Large Language Models (LLMs) have been adopted for a variety of visualizations tasks, but how far are we from perceptually aware LLMs that can predict human takeaways? Graphical perception literature has shown that human chart takeaways are sensitive to visualization design choices, such as spatial layouts. In this work, we examine the extent to which LLMs exhibit such sensitivity when generating takeaways, using bar charts with varying spatial layouts as a case study. We conducted three experiments and tested four common bar chart layouts: vertically juxtaposed, horizontally juxtaposed, overlaid, and stacked. In Experiment 1, we identified the optimal configurations to generate meaningful chart takeaways by testing four LLMs, two temperature settings, nine chart specifications, and two prompting strategies. We found that even state-of-the-art LLMs struggled to generate semantically diverse and factually accurate takeaways. In Experiment 2, we used the optimal configurations to generate 30 chart takeaways each for eight visualizations across four layouts and two datasets in both zero-shot and one-shot settings. Compared to human takeaways, we found that the takeaways LLMs generated often did not match the types of comparisons made by humans. In Experiment 3, we examined the effect of chart context and data on LLM takeaways. We found that LLMs, unlike humans, exhibited variation in takeaway comparison types for different bar charts using the same bar layout. Overall, our case study evaluates the ability of LLMs to emulate human interpretations of data and points to challenges and opportunities in using LLMs to predict human chart takeaways.","accessible_pdf":false,"authors":[{"affiliations":["University of Washington, Seattle, United States"],"email":"wwill@cs.washington.edu","is_corresponding":true,"name":"Huichen Will Wang"},{"affiliations":["Adobe Research, Seattle, United States"],"email":"jhoffs@adobe.com","is_corresponding":false,"name":"Jane Hoffswell"},{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"yukithane@gmail.com","is_corresponding":false,"name":"Sao Myat Thazin Thane"},{"affiliations":["Adobe Research, San Jose, United States"],"email":"victorbursztyn2022@u.northwestern.edu","is_corresponding":false,"name":"Victor S. Bursztyn"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"cxiong@gatech.edu","is_corresponding":false,"name":"Cindy Xiong Bearfield"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1544","image_caption":"There is a discrepancy between human chart takeaways and predictions of human chart takeaways generated by large language models. For a chart that shows the prices of three drinks in two bars, a human would tend to compare the prices of Drink 2 between the two bars, but the model predicts a human to compare the prices of the three drinks in Bar B.","keywords":["Visualization, Graphical Perception, Large Language Models"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1544/v-full-1544_Preview.mp4?token=xASKJTPbGCrNglAxnnLQVlf3Wmp5QMpznTvs0msc0MA&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"L_tj96AoLnI","session_youtube_ff_link":"https://youtu.be/L_tj96AoLnI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h52m20s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T13:18:00Z","title":"How Aligned are Human Chart Takeaways and LLM Predictions? A Case Study on Bar Charts with Varying Layouts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1533","abstract":"We introduce DiffFit, a differentiable algorithm for fitting protein atomistic structures into an experimental reconstructed Cryo-Electron Microscopy (cryo-EM) volume map. In structural biology, this process is necessary to semi-automatically composite large mesoscale models of complex protein assemblies and complete cellular structures that are based on measured cryo-EM data. The current approaches require manual fitting in three dimensions to start, resulting in approximately aligned structures followed by an automated fine-tuning of the alignment. The DiffFit approach enables domain scientists to fit new structures automatically and visualize the results for inspection and interactive revision. The fitting begins with differentiable three-dimensional (3D) rigid transformations of the protein atom coordinates followed by sampling the density values at the atom coordinates from the target cryo-EM volume. To ensure a meaningful correlation between the sampled densities and the protein structure, we proposed a novel loss function based on a multi-resolution volume-array approach and the exploitation of the negative space. This loss function serves as a critical metric for assessing the fitting quality, ensuring the fitting accuracy and an improved visualization of the results. We assessed the placement quality of DiffFit with several large, realistic datasets and found it to be superior to that of previous methods. We further evaluated our method in two use cases: automating the integration of known composite structures into larger protein complexes and facilitating the fitting of predicted protein domains into volume densities to aid researchers in identifying unknown proteins. We implemented our algorithm as an open-source plugin (github.com/nanovis/DiffFitViewer) in ChimeraX, a leading visualization software in the field. All supplemental materials are available at osf.io/5tx4q.","accessible_pdf":false,"authors":[{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"deng.luo@kaust.edu.sa","is_corresponding":true,"name":"Deng Luo"},{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"zainab.alsuwaykit@kaust.edu.sa","is_corresponding":false,"name":"Zainab Alsuwaykit"},{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"dawar.khan@kaust.edu.sa","is_corresponding":false,"name":"Dawar Khan"},{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"ondrej.strnad@kaust.edu.sa","is_corresponding":false,"name":"Ond\u0159ej Strnad"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tobias.isenberg@gmail.com","is_corresponding":false,"name":"Tobias Isenberg"},{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"ivan.viola@kaust.edu.sa","is_corresponding":false,"name":"Ivan Viola"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1533","image_caption":"DiffFit workflow. The target cryo-EM volume and the structures to be fit on the top left serve as inputs, which are passed into the novel volume processing, followed by the differentiable fitting algorithm. The fitting results are then clustered and inspected by the expert. The expert may zero out voxels corresponding to the placed structures and feed the map back iteratively as input for a new fitting round until the compositing is done.","keywords":["Scalar field data, algorithms, application-motivated visualization, process/workflow design, life sciences, health, medicine, biology, structural biology, bioinformatics, genomics, cryo-EM"],"open_access_supplemental_link":"https://osf.io/5tx4q/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2404.02465","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1533/v-full-1533_Preview.mp4?token=V8OcgojfOjgG9iatxgIF8UGOAImc5hoAlvO4BVA5RCg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1533/v-full-1533_Preview.srt?token=YiBU9ZXJyfd4EKHrfRQNns5cPKwO7m2mkW2OnbcRTGg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-full","session_youtube_ff_id":"ptmTip8km8k","session_youtube_ff_link":"https://youtu.be/ptmTip8km8k","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=0h0m24s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T14:15:00Z","title":"DiffFit: Visually-Guided Differentiable Fitting of Molecule Structures to a Cryo-EM Map","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1597","abstract":"In understanding and redesigning the function of proteins in modern biochemistry, protein engineers are increasingly focusing on exploring regions in proteins called loops. Analyzing various characteristics of these regions helps the experts design the transfer of the desired function from one protein to another. This process is denoted as loop grafting. We designed a set of interactive visualizations that provide experts with visual support through all the loop grafting pipeline steps. The workflow is divided into several phases, reflecting the steps of the pipeline. Each phase is supported by a specific set of abstracted 2D visual representations of proteins and their loops that are interactively linked with the 3D View of proteins. By sequentially passing through the individual phases, the user shapes the list of loops that are potential candidates for loop grafting. Finally, the actual in-silico insertion of the loop candidates from one protein to the other is performed, and the results are visually presented to the user. In this way, the fully computational rational design of proteins and their loops results in newly designed protein structures that can be further assembled and tested through in-vitro experiments. We showcase the contribution of our visual support design on a real case scenario changing the enantiomer selectivity of the engineered enzyme. Moreover, we provide the readers with the experts\u2019 feedback.","accessible_pdf":false,"authors":[{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic"],"email":"kiraa@mail.muni.cz","is_corresponding":true,"name":"Filip Op\u00e1len\u00fd"},{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic"],"email":"paloulbrich@gmail.com","is_corresponding":false,"name":"Pavol Ulbrich"},{"affiliations":["Masaryk University, Brno, Czech Republic","St. Anne\u2019s University Hospital, Brno, Czech Republic"],"email":"joan.planas@mail.muni.cz","is_corresponding":false,"name":"Joan Planas-Iglesias"},{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic","University of Bergen, Bergen, Norway"],"email":"xbyska@fi.muni.cz","is_corresponding":false,"name":"Jan By\u0161ka"},{"affiliations":["Masaryk University, Brno, Czech Republic","St. Anne\u2019s University Hospital, Brno, Czech Republic"],"email":"stourac.jan@gmail.com","is_corresponding":false,"name":"Jan \u0160toura\u010d"},{"affiliations":["Faculty of Science, Masaryk University, Brno, Czech Republic","St. Anne\u2019s University Hospital Brno, Brno, Czech Republic"],"email":"222755@mail.muni.cz","is_corresponding":false,"name":"David Bedn\u00e1\u0159"},{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic"],"email":"katarina.furmanova@gmail.com","is_corresponding":false,"name":"Katar\u00edna Furmanov\u00e1"},{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"kozlikova@fi.muni.cz","is_corresponding":false,"name":"Barbora Kozlikova"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1597","image_caption":"Protein engineers are focusing on protein loops to design novel proteins through a process called loop grafting. This involves transferring loops to transfer some desired functions from one protein to another. This paper introduces a set of interactive visualizations that support experts throughout the loop grafting pipeline. The workflow is divided into phases, each with specific 2D and 3D visual representations of proteins and their loops. With the aid of these visualizations, users iteratively identify potential loop candidates before performing an in-silico loop grafting and visualizing the results. The approach was validated with an expert case study, demonstrating its effectiveness.","keywords":["Protein visualization, protein engineering, loop grafting, abstract views"],"open_access_supplemental_link":"https://gitlab.fi.muni.cz/visitlab/loopgrafter-frontend-1.2","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.20054","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1597/v-full-1597_Preview.mp4?token=dqG5O6MdgNjF_Kd8T_AnVvxZsuj2zZfem9zMErGW_OM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1597/v-full-1597_Preview.srt?token=pfEKxAwTz0x9qiWxPObe6aSIrQZvBrkeGs6OdI0iXL0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-full","session_youtube_ff_id":"TjB6UTqQMHc","session_youtube_ff_link":"https://youtu.be/TjB6UTqQMHc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=1h1m19s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T15:15:00Z","title":"Visual Support for the Loop Grafting Workflow on Proteins","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1615","abstract":"We present Cell2Cell, a novel visual analytics approach for quantifying and visualizing networks of cell-cell interactions in three-dimensional (3D) multi-channel cancerous tissue data. By analyzing cellular interactions, biomedical experts can gain a more accurate understanding of the intricate relationships between cancer and immune cells. Recent methods have focused on inferring interaction based on the proximity of cells in low-resolution 2D multi-channel imaging data. By contrast, we analyze cell interactions by quantifying the presence and levels of specific proteins within a tissue sample (protein expressions) extracted from high-resolution 3D multi-channel volume data. Such analyses have a strong exploratory nature and require a tight integration of domain experts in the analysis loop to leverage their deep knowledge. We propose two complementary semi-automated approaches to cope with the increasing size and complexity of the data interactively: On the one hand, we interpret cell-to-cell interactions as edges in a cell graph and analyze the image signal (protein expressions) along those edges, using spatial as well as abstract visualizations. Complementary, we propose a cell-centered approach, enabling scientists to visually analyze polarized distributions of proteins in three dimensions, which also captures neighboring cells with biochemical and cell biological consequences. We evaluate our application in three case studies, where biologists and medical experts use \\tool to investigate tumor micro-environments to identify and quantify T-cell activation in human tissue data. We confirmed that our tool can fully solve the use cases and enables a streamlined and detailed analysis of cell-cell interactions.","accessible_pdf":false,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"eric.moerth@gmx.at","is_corresponding":true,"name":"Eric M\u00f6rth"},{"affiliations":["University of Vienna, Vienna, Austria"],"email":"kevin.sidak@univie.ac.at","is_corresponding":false,"name":"Kevin Sidak"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"zoltan_maliga@hms.harvard.edu","is_corresponding":false,"name":"Zoltan Maliga"},{"affiliations":["University of Vienna, Vienna, Austria"],"email":"torsten.moeller@univie.ac.at","is_corresponding":false,"name":"Torsten M\u00f6ller"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"peter_sorger@hms.harvard.edu","is_corresponding":false,"name":"Peter Sorger"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"pfister@seas.harvard.edu","is_corresponding":false,"name":"Hanspeter Pfister"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"jbeyer@g.harvard.edu","is_corresponding":false,"name":"Johanna Beyer"},{"affiliations":["New York University, New York, United States","Harvard University, Boston, United States"],"email":"rk4815@nyu.edu","is_corresponding":false,"name":"Robert Kr\u00fcger"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1615","image_caption":"Cell2Cell is a web-based visual analytics system to analyze interactions of cells in 3D biological tissue imaging data. a) Multi-volume viewer using pseudo-colors. The embedded interaction graph displays cells (nodes) and their interactions (edges). b) Cell interaction profiles show the spatial intensity distribution of protein markers between cells. c) Multiple interactions can be compared channel by channel. d) Heatmaps (overview) and line charts (details) can be toggled on demand. e) Radial polarization charts enable cell-centric analysis. f) The side panel allows users to customize color settings and (de)activate channels.","keywords":["Biomedical visualization, 3D multi-channel tissue data, Direct volume rendering, Quantitative analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/axy82","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1615/v-full-1615_Preview.mp4?token=3texbGZWm2_V72b-KPt0rGFoMxv76gsFkNDOwCnQ1FE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1615/v-full-1615_Preview.srt?token=aX17wdDATdRzBDANHUFadpD1V288PgtmL8VMdidF2O8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-full","session_youtube_ff_id":"wVBlWgy1Gd8","session_youtube_ff_link":"https://youtu.be/wVBlWgy1Gd8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=0h48m12s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T15:03:00Z","title":"Cell2Cell: Explorative Cell Interaction Analysis in Multi-Volumetric Tissue Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233337642","abstract":"Molecular docking is a key technique in various fields like structural biology, medicinal chemistry, and biotechnology. It is widely used for virtual screening during drug discovery, computer-assisted drug design, and protein engineering. A general molecular docking process consists of the target and ligand selection, their preparation, and the docking process itself, followed by the evaluation of the results. However, the most commonly used docking software provides no or very basic evaluation possibilities. Scripting and external molecular viewers are often used, which are not designed for an efficient analysis of docking results. Therefore, we developed InVADo, a comprehensive interactive visual analysis tool for large docking data. It consists of multiple linked 2D and 3D views. It filters and spatially clusters the data, and enriches it with post-docking analysis results of protein-ligand interactions and functional groups, to enable well-founded decision-making. In an exemplary case study, domain experts confirmed that InVADo facilitates and accelerates the analysis workflow. They rated it as a convenient, comprehensive, and feature-rich tool, especially useful for virtual screening.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Marco Sch\u00e4fer"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nicolas Brich"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jan By\u0161ka"},{"affiliations":"","email":"","is_corresponding":false,"name":"S\u00e9rgio M. Marques"},{"affiliations":"","email":"","is_corresponding":false,"name":"David Bedn\u00e1\u0159"},{"affiliations":"","email":"","is_corresponding":false,"name":"Philipp Thiel"},{"affiliations":"","email":"","is_corresponding":false,"name":"Barbora Kozl\u00edkov\u00e1"},{"affiliations":"","email":"","is_corresponding":true,"name":"Michael Krone"}],"award":"","doi":"10.1109/TVCG.2023.3337642","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233337642","image_caption":"InVADo (Interactive Visual Analysis of Molecular Docking Data) is a visual analytics tool for molecular docking data. It allows users to interactively rank, filter, and cluster the docked compounds and offers a combination of linked 3D and 2D views providing information about the spatial arrangement of the molecules, the type of interaction, or propensities for certain functional groups. The goal of the exploratory visual analysis approach supported by InVADo is to support drug design and similar biochemical applications. ","keywords":["Molecular Docking, AutoDock, Virtual Screening, Visual Analysis, Visualization, Clustering, Protein-Ligand Interaction."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233337642/v-tvcg-20233337642_Preview.mp4?token=rfeTvNDsaMGzJH6vUQuVUxJMeGbCJpnLMVqOySvUdDU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"AGsPOoexonM","session_youtube_ff_link":"https://youtu.be/AGsPOoexonM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=0h23m15s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T14:39:00Z","title":"InVADo: Interactive Visual Analysis of Molecular Docking Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243385118","abstract":"Genomics is at the core of precision medicine, and there are high expectations on genomics-enabled improvement of patient outcomes in the years to come. Around the world, initiatives to increase the use of DNA sequencing in clinical routine are being deployed, such as the use of broad panels in the standard care for oncology patients. Such a development comes at the cost of increased demands on throughput in genomic data analysis. In this paper, we use the task of copy number variant (CNV) analysis as a context for exploring visualization concepts for clinical genomics. CNV calls are generated algorithmically, but time-consuming manual intervention is needed to separate relevant findings from irrelevant ones in the resulting large call candidate lists. We present a visualization environment, named Copycat, to support this review task in a clinical scenario.Key components are a scatter-glyph plot replacing the traditional list visualization, and a glyph representation designed for at-a-glance relevance assessments. Moreover, we present results from a formative evaluation of the prototype by domain specialists, from which we elicit insights to guide both prototype improvements and visualization for clinical genomics in general.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Emilia St\u00e5hlbom"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jesper Molin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Claes Lundstr\u00f6m"},{"affiliations":"","email":"","is_corresponding":false,"name":"Anders Ynnerman"}],"award":"","doi":"10.1109/TVCG.2024.3385118","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243385118","image_caption":"We created a visualization environment for reviewing genomics data in clinical settings, specifically aimed at review of structural variation. The design utilizes the visual space to through a scatter-glyph plot, and supports an iterative workflow with overview first and details on demand. The position and the three parts of the glyph encode the most important information, and each part of the glyph is designed to utilize a unique visual information channel, minimizing interference and allowing for at-a-glance evaluation of each glyph.","keywords":["Visualization, genomics, copy number variants, clinical decision support, evaluation"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243385118/v-tvcg-20243385118_Preview.mp4?token=wAnNZDc4qcgxf5ZCybmQqWXFTwYRvvAy0ATyhxuTuL0&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"AHFJlbQhYVA","session_youtube_ff_link":"https://youtu.be/AHFJlbQhYVA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=0h36m14s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T14:51:00Z","title":"Visualization for diagnostic review of copy number variants in complex DNA sequencing data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243411786","abstract":"We present a novel method for the interactive construction and rendering of extremely large molecular scenes, capable of representing multiple biological cells in atomistic detail. Our method is tailored for scenes, which are procedurally constructed, based on a given set of building rules. Rendering of large scenes normally requires the entire scene available in-core, or alternatively, it requires out-of-core management to load data into the memory hierarchy as a part of the rendering loop. Instead of out-of-core memory management, we propose to procedurally generate the scene on-demand on the fly. The key idea is a positional- and view-dependent procedural scene-construction strategy, where only a fraction of the atomistic scene around the camera is available in the GPU memory at any given time. The atomistic detail is populated into a uniform-space partitioning using a grid that covers the entire scene. Most of the grid cells are not filled with geometry, only those are populated that are potentially seen by the camera. The atomistic detail is populated in a compute shader and its representation is connected with acceleration data structures for hardware ray-tracing of modern GPUs. Objects which are far away, where atomistic detail is not perceivable from a given viewpoint, are represented by a triangle mesh mapped with a seamless texture, generated from the rendering of geometry from atomistic detail. The algorithm consists of two pipelines, the construction-compute pipeline, and the rendering pipeline, which work together to render molecular scenes at an atomistic resolution far beyond the limit of the GPU memory containing trillions of atoms. We demonstrate our technique on multiple models of SARS-CoV-2 and the red blood cell.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Ruwayda Alharbi"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ond\u02c7rej Strnad"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tobias Klein"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ivan Viola"}],"award":"","doi":"10.1109/TVCG.2024.3411786","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243411786","image_caption":"Several populated SARS-CoV-2 virions over Red blood cell particles. The fully textured proxy geometries with partially populated atomistic details are presented in the top-left part, whereas the bottom-right part showcases the continuous Wang tiling used for placement of atomistic details.","keywords":["Interactive rendering, view-guided scene construction, biological data, hardware ray tracing"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2204.05762","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243411786/v-tvcg-20243411786_Preview.mp4?token=Sfz1lUTn5a3NOZ0Ph_-sj5NUxB6j2ZFi0aFaci4A1-8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243411786/v-tvcg-20243411786_Preview.srt?token=qaKnYH41-gSfCdIe9N907yzyt99YIX9TAQx23ahNJIo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"e377VOBXmUw","session_youtube_ff_link":"https://youtu.be/e377VOBXmUw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=0h10m16s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T14:27:00Z","title":"\u201cNanomatrix: Scalable Construction of Crowded Biological Environments\u201d","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1150","abstract":"Composite visualization represents a widely embraced design that combines multiple visual representations to create an integrated view.However, the traditional approach of creating composite visualizations in immersive environments typically occurs asynchronously outside of the immersive space and is carried out by experienced experts.In this work, we aim to empower users to participate in the creation of composite visualization within immersive environments through embodied interactions.This could provide a flexible and fluid experience with immersive visualization and has the potential to facilitate understanding of the relationship between visualization views. We begin with developing a design space of embodied interactions to create various types of composite visualizations with the consideration of data relationships. Drawing inspiration from people's natural experience of manipulating physical objects, we design interactions based on the combination of 3D manipulations in immersive environments. Building upon the design space, we present a series of case studies showcasing the interaction to create different kinds of composite visualizations in virtual reality.Subsequently, we conduct a user study to evaluate the usability of the derived interaction techniques and user experience of creating composite visualizations through embodied interactions.We find that empowering users to participate in composite visualizations through embodied interactions enables them to flexibly leverage different visualization views for understanding and communicating the relationships between different views, which underscores the potential of several future application scenarios.","accessible_pdf":true,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China","The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"qzhual@connect.ust.hk","is_corresponding":false,"name":"Qian Zhu"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States","Georgia Institute of Technology, Atlanta, United States"],"email":"luttul@umich.edu","is_corresponding":false,"name":"Tao Lu"},{"affiliations":["Adobe Research, San Jose, United States","Adobe Research, San Jose, United States"],"email":"sguo@adobe.com","is_corresponding":false,"name":"Shunan Guo"},{"affiliations":["Hong Kong University of Science and Technology, Hong Kong, Hong Kong","Hong Kong University of Science and Technology, Hong Kong, Hong Kong"],"email":"mxj@cse.ust.hk","is_corresponding":false,"name":"Xiaojuan Ma"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States","Georgia Institute of Technology, Atlanta, United States"],"email":"yalongyang@hotmail.com","is_corresponding":true,"name":"Yalong Yang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1150","image_caption":"This image shows the five cases that represent the idea of our paper: Using embodied interaction to create composite visualization in immersive environments.","keywords":["Composite Visualization, Immersive Analytics, Embodied Interaction"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.02240","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1150/v-full-1150_Preview.mp4?token=3zcKF5eUe8oDzLwVnn2EnfyD7Lz5z_AI81xkAMxN1Ew&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1150/v-full-1150_Preview.srt?token=BgC1E4VgKY7PVi-dOg8GTMXH_hLldpvlolB0BK8D9-E&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-full","session_youtube_ff_id":"vngAibFJrlE","session_youtube_ff_link":"https://youtu.be/vngAibFJrlE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=0h0m56s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T12:30:00Z","title":"CompositingVis: Exploring Interaction for Creating Composite Visualizations in Immersive Environments","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1699","abstract":"Room-scale immersive data visualisations provide viewers a wide-scale overview of a large dataset, but to interact precisely with individual data points they typically have to navigate to change their point of view. In traditional screen-based visualisations, focus-and-context techniques allow visualisation users to keep a full dataset in view while making detailed selections. Such techniques have been studied extensively on desktop to allow precise selection within large data sets, but they have not been explored in immersive 3D modalities. In this paper we develop a novel immersive focus-and-context technique based on a ``magic portal'' metaphor adapted specifically for data visualisation scenarios. An extendable-hand interaction technique is used to place a portal close to the region of interest.The other end of the portal then opens comfortably within the user's physical reach such that they can reach through to precisely select individual data points.Through a controlled study with 12 participants, we find strong evidence that portals reduce overshoots in selection and overall hand trajectory length, reducing arm and shoulder fatigue compared to ranged interaction without the portal.The portals also enable us to use a robot arm to provide haptic feedback for data within the limited volume of the portal region. In a second study with another 12 participants we found that haptics provided a positive experience (qualitative feedback) but did not significantly reduce fatigue. We demonstrate applications for portal-based selection through two use-case scenarios.","accessible_pdf":true,"authors":[{"affiliations":["Monash University, Melbourne, Australia"],"email":"dai.shaozhang@gmail.com","is_corresponding":false,"name":"Shaozhang Dai"},{"affiliations":["Monash University, Melbourne, Australia"],"email":"yi.li5@monash.edu","is_corresponding":false,"name":"Yi Li"},{"affiliations":["The University of British Columbia (Okanagan Campus), Kelowna, Canada"],"email":"barrett.ens@ubc.ca","is_corresponding":false,"name":"Barrett Ens"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"lonni.besancon@gmail.com","is_corresponding":true,"name":"Lonni Besan\u00e7on"},{"affiliations":["Monash University, Melbourne, Australia"],"email":"tgdwyer@gmail.com","is_corresponding":false,"name":"Tim Dwyer"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1699","image_caption":"Magic Portal for data selection. User extends virtual arm to place portal near distant data. Portal opens within reach, allowing easy selection of distant points. Robot arm provides haptic feedback for interactions through the portal.","keywords":["immersive analytics, focus-and-context, remote interaction, portal, haptic feedback"],"open_access_supplemental_link":"https://osf.io/afmwx/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/6c7za?view_only=","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1699/v-full-1699_Preview.mp4?token=wT8OvZyLiVvdKlORk7nzC3ZNKxJnACWjffJwJAYdzQU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1699/v-full-1699_Preview.srt?token=qiiiLObmOmzdMBa6T5PyiZm9r3DRn9EQAmrxulLxlZs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-full","session_youtube_ff_id":"hJ1I_66AuK0","session_youtube_ff_link":"https://youtu.be/hJ1I_66AuK0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=0h38m35s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T13:06:00Z","title":"Precise Embodied Data Selection in Room-scale Visualisations While Retaining View Context","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233299602","abstract":"Data transformation is an essential step in data science. While experts primarily use programming to transform their data, there is an increasing need to support non-programmers with user interface-based tools. With the rapid development in interaction techniques and computing environments, we report our empirical findings about the effects of interaction techniques and environments on performing data transformation tasks. Specifically, we studied the potential benefits of direct interaction and virtual reality (VR) for data transformation. We compared gesture interaction versus a standard WIMP user interface, each on the desktop and in VR. With the tested data and tasks, we found time performance was similar between desktop and VR. Meanwhile, VR demonstrates preliminary evidence to better support provenance and sense-making throughout the data transformation process. Our exploration of performing data transformation in VR also provides initial affirmation for enabling an iterative and fully immersive data science workflow.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Sungwon In"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tica Lin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chris North"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hanspeter Pfister"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yalong Yang"}],"award":"","doi":"10.1109/TVCG.2023.3299602","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233299602","image_caption":"Four conditions designed for performing data transformation in the user study, including a combination of desktop or VR environments, and WIMP or gesture interactions.","keywords":["Immersive Analytics, Data Transformation, Data Science, Interaction, Empirical Study, Virtual/Augmented/Mixed Reality"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2309.12168","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233299602/v-tvcg-20233299602_Preview.mp4?token=YwrZJyFLLJUsc8qH6NZ-PXjTAwC5-rRg_7eZLTrqQbQ&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-tvcg","session_youtube_ff_id":"x7MhNW0QKSo","session_youtube_ff_link":"https://youtu.be/x7MhNW0QKSo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=0h14m24s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T12:42:00Z","title":"This is the Table I Want! Interactive Data Transformation on Desktop and in Virtual Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233322898","abstract":"Visual and interactive machine learning systems (IML) are becoming ubiquitous as they empower individuals with varied machine learning expertise to analyze data. However, it remains complex to align interactions with visual marks to a user\u2019s intent for steering machine learning models. We explore using data and visual design probes to elicit users\u2019 desired interactions to steer ML models via visual encodings within IML interfaces. We conducted an elicitation study with 20 data analysts with varying expertise in ML. We summarize our findings as pairs of target-interaction, which we compare to prior systems to assess the utility of the probes. We additionally surfaced insights about factors influencing how and why participants chose to interact with visual encodings, including refraining from interacting. Finally, we reflect on the value of gathering such formative empirical evidence via data and visual design probes ahead of developing IML prototypes. ","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Anamaria Crisan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Maddie Shang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Eric Brochu"}],"award":"","doi":"10.1109/TVCG.2023.3322898","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"v-tvcg-20233322898","image_caption":"","keywords":["Design Probes, Interactive Machine Learning, Model Steering, Semantic Interaction"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-tvcg","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=1h4m15s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T13:30:00Z","title":"Eliciting Model Steering Interactions from Users via Data and Visual Design Probes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233334513","abstract":"Data integration is often performed to consolidate information from multiple disparate data sources during visual data analysis. However, integration operations are usually separate from visual analytics operations such as encode and filter in both interface design and empirical research. We conducted a preliminary user study to investigate whether and how data integration should be incorporated directly into the visual analytics process. We used two interface alternatives featuring contrasting approaches to the data preparation and analysis workflow: manual file-based ex-situ integration as a separate step from visual analytics operations; and automatic UI-based in-situ integration merged with visual analytics operations. Participants were asked to complete specific and free-form tasks with each interface, browsing for patterns, generating insights, and summarizing relationships between attributes distributed across multiple files. Analyzing participants' interactions and feedback, we found both task completion time and total interactions to be similar across interfaces and tasks, as well as unique integration strategies between interfaces and emergent behaviors related to satisficing and cognitive bias. Participants' time spent and interactions revealed that in-situ integration enabled users to spend more time on analysis tasks compared with ex-situ integration. Participants' integration strategies and analytical behaviors revealed differences in interface usage for generating and tracking hypotheses and insights. With these results, we synthesized preliminary guidelines for designing future visual analytics interfaces that can support integrating attributes throughout an active analysis process.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Adam Coscia"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ashley Suh"},{"affiliations":"","email":"","is_corresponding":false,"name":"Remco Chang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alex Endert"}],"award":"","doi":"10.1109/TVCG.2023.3334513","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233334513","image_caption":"We studied differences in sensemaking during visual data analysis between manually integrating data with Excel versus automatic integration built-in to a visual analytics interface. We discovered unique analysis strategies with automatic integration, as well as negative effects on tracking insights, satisficing and biased behaviors. We contribute open questions and design guidelines for building future tools that integrate data throughout the visual analytics process. Our data, analysis, and results are all open-source and available at: https://github.com/AdamCoscia/Integration-Guidelines-VA. To read about them, check out our paper!","keywords":["Visual analytics, Data integration, User interface design, Integration strategies, Analytical behaviors."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2403.04757","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233334513/v-tvcg-20233334513_Preview.mp4?token=Hl9uNjL8zDZnP5-6I9K85S2LiboQcD3u9kavKSJC9JY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233334513/v-tvcg-20233334513_Preview.srt?token=X4-7kksvv3BwCLt9AwOuiXQ1n9hNxJx4AI3eTJky3Mg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-tvcg","session_youtube_ff_id":"8EFRrhaq9Bg","session_youtube_ff_link":"https://youtu.be/8EFRrhaq9Bg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=0h51m10s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T13:18:00Z","title":"Preliminary Guidelines For Combining Data Integration and Visual Data Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233340770","abstract":"We present VoxAR, a method to facilitate an effective visualization of volume-rendered objects in optical see-through head-mounted displays (OST-HMDs). The potential of augmented reality (AR) to integrate digital information into the physical world provides new opportunities for visualizing and interpreting scientific data. However, a limitation of OST-HMD technology is that rendered pixels of a virtual object can interfere with the colors of the real-world, making it challenging to perceive the augmented virtual information accurately. We address this challenge in a two-step approach. First, VoxAR determines an appropriate placement of the volume-rendered object in the real-world scene by evaluating a set of spatial and environmental objectives, managed as user-selected preferences and pre-defined constraints. We achieve a real-time solution by implementing the objectives using a GPU shader language.Next, VoxAR adjusts the colors of the input transfer function (TF) based on the real-world placement region. Specifically, we introduce a novel optimization method that adjusts the TF colors such that the resulting volume-rendered pixels are discernible against the background and the TF maintains the perceptual mapping between the colors and data intensity values. Finally, we present an assessment of our approach through objective evaluations and subjective user studies.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Saeed Boorboor"},{"affiliations":"","email":"","is_corresponding":false,"name":"Matthew S. Castellana"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yoonsang Kim"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zhutian Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Johanna Beyer"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hanspeter Pfister"},{"affiliations":"","email":"","is_corresponding":false,"name":"Arie E. Kaufman"}],"award":"","doi":"10.1109/TVCG.2023.3340770","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233340770","image_caption":"For visualizing a volume-rendered virtual object, in a real-world scene, using an OST-HMD, our framework, VoxAR determines its meaningful placement and, accordingly, adjusts its transfer function (TF) to enhance visibility. A side-by-side comparison is shown of how the data volume rendered with the adjusted TF effectively improves visibility in OST-AR, when augmented in a spatial location determined by VoxAR.","keywords":["Adaptive Visualization, Situated Visualization, Augmented Reality, Volume Rendering"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233340770/v-tvcg-20233340770_Preview.mp4?token=KXyqwpdkPuPiW1t4Calqu3RvpHF-1AiM5Pl7UjrXVS4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233340770/v-tvcg-20233340770_Preview.srt?token=BdToBMKduiDB8VILXtDqGW7oNa1yOZiUolvNrOb96Aw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-tvcg","session_youtube_ff_id":"K3ozRzBvwBw","session_youtube_ff_link":"https://youtu.be/K3ozRzBvwBw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=0h26m57s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T12:54:00Z","title":"VoxAR: Adaptive Visualization of Volume Rendered Objects in Optical See-Through Augmented Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1391","abstract":"In volume visualization, visualization synthesis has attracted much attention due to its ability to generate novel visualizations without following the conventional rendering pipeline. However, existing solutions based on generative adversarial networks often require many training images and take significant training time. Still, issues such as low quality, consistency, and flexibility persist. This paper introduces StyleRF-VolVis, an innovative style transfer framework for expressive volume visualization (VolVis) via neural radiance field (NeRF). The expressiveness of StyleRF-VolVis is upheld by its ability to accurately separate the underlying scene geometry (i.e., content) and color appearance (i.e., style), conveniently modify color, opacity, and lighting of the original rendering while maintaining visual content consistency across the views, and effectively transfer arbitrary styles from reference images to the reconstructed 3D scene. To achieve these, we design a base NeRF model for scene geometry extraction, a palette color network to classify regions of the radiance field for photorealistic editing, and an unrestricted color network to lift the color palette constraint via knowledge distillation for non-photorealistic editing. We demonstrate the superior quality, consistency, and flexibility of StyleRF-VolVis by experimenting with various volume rendering scenes and reference images and comparing StyleRF-VolVis against other image-based (AdaIN), video-based (ReReVST), and NeRF-based (ARF and SNeRF) style rendering solutions.","accessible_pdf":false,"authors":[{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"ktang2@nd.edu","is_corresponding":true,"name":"Kaiyuan Tang"},{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"chaoli.wang@nd.edu","is_corresponding":false,"name":"Chaoli Wang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1391","image_caption":"StyleRF-VolVis is an innovative style transfer framework based on the neural radiance field for expressive volume visualization. This framework contains three components: a base NeRF model for ensuring accurate geometry reconstruction, a palette color network to support photorealistic style editing, and an unrestricted color network to achieve non-photorealistic style editing. ","keywords":["Style transfer, neural radiance field, knowledge distillation, volume visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.00150","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1391/v-full-1391_Preview.mp4?token=ITGmgaQ31HqudY__3ys3icjyOy8ieyvto0Z2lDmm_8A&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-full","session_youtube_ff_id":"TTUmK5WKV_w","session_youtube_ff_link":"https://youtu.be/TTUmK5WKV_w","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=0h25m50s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T12:54:00Z","title":"StyleRF-VolVis: Style Transfer of Neural Radiance Fields for Expressive Volume Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1427","abstract":"Numerical simulation serves as a cornerstone in scientific modeling, yet the process of fine-tuning simulation parameters poses significant challenges. Conventionally, parameter adjustment relies on extensive numerical simulations, data analysis, and expert insights, resulting in substantial computational costs and low efficiency. The emergence of deep learning in recent years has provided promising avenues for more efficient exploration of parameter spaces. However, existing approaches often lack intuitive methods for precise parameter adjustment and optimization. To tackle these challenges, we introduce ParamsDrag, a model that facilitates parameter space exploration through direct interaction with visualizations. Inspired by DragGAN, our ParamsDrag model operates in three steps. First, the generative component of ParamsDrag generates visualizations based on the input simulation parameters. Second, by directly dragging structure-related features in the visualizations, users can intuitively understand the controlling effect of different parameters. Third, with the understanding from the earlier step, users can steer ParamsDrag to produce dynamic visual outcomes. Through experiments conducted on real-world simulations and comparisons with state-of-the-art deep learning-based approaches, we demonstrate the efficacy of our solution.","accessible_pdf":true,"authors":[{"affiliations":["Computer Network Information Center, Chinese Academy of Sciences"],"email":"liguan@sccas.cn","is_corresponding":true,"name":"Guan Li"},{"affiliations":["Beijing Forestry University"],"email":"leo_edumail@163.com","is_corresponding":false,"name":"Yang Liu"},{"affiliations":["Computer Network Information Center, Chinese Academy of Sciences"],"email":"sgh@sccas.cn","is_corresponding":false,"name":"Guihua Shan"},{"affiliations":["Chinese Academy of Sciences"],"email":"chengshiyu@cnic.cn","is_corresponding":false,"name":"Shiyu Cheng"},{"affiliations":["Beijing Forestry University"],"email":"weiqun.cao@126.com","is_corresponding":false,"name":"Weiqun Cao"},{"affiliations":["Visa Research"],"email":"junpeng.wang.nk@gmail.com","is_corresponding":false,"name":"Junpeng Wang"},{"affiliations":["National Taiwan Normal University"],"email":"caseywang777@gmail.com","is_corresponding":false,"name":"Ko-Chih Wang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1427","image_caption":"ParamsDrag is a surrogate model developed to enhance the exploration of parameter spaces through direct interaction with visualizations. It allows scientists to intuitively manipulate a feature of interest by dragging it to a desired location within a visualization, subsequently generating the corresponding image. Additionally, ParamsDrag can retrieve the simulation parameters that led to the generation of the selected image, thereby streamlining the process of parameter identification and adjustment.","keywords":["parameter exploration, feature interaction, parameter inversion"],"open_access_supplemental_link":"https://github.com/YangL-04-20/ParamsDrag","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14100","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1427/v-full-1427_Preview.mp4?token=QuVb7a5FWGuhagCSddIouToS9sigElvTzOsQLahtvBE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1427/v-full-1427_Preview.srt?token=T679EKpxR4eqH0-ZThnkpZDa5UXR34a61w341c6xiPk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-full","session_youtube_ff_id":"qD2sZpl6UHU","session_youtube_ff_link":"https://youtu.be/qD2sZpl6UHU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=0h38m22s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T13:06:00Z","title":"ParamsDrag: Interactive Parameter Space Exploration via Image-Space Dragging","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1599","abstract":"Existing deep learning-based surrogate models facilitate efficient data generation, but fall short in uncertainty quantification, efficient parameter space exploration, and reverse prediction. In our work, we introduce SurroFlow, a novel normalizing flow-based surrogate model, to learn the invertible transformation between simulation parameters and simulation outputs. The model not only allows accurate predictions of simulation outcomes for a given simulation parameter but also supports uncertainty quantification in the data generation process. Additionally, it enables efficient simulation parameter recommendation and exploration. We integrate SurroFlow and a genetic algorithm as the backend of a visual interface to support effective user-guided ensemble simulation exploration and visualization. Our framework significantly reduces the computational costs while enhancing the reliability and exploration capabilities of scientific surrogate models.","accessible_pdf":false,"authors":[{"affiliations":["The Ohio State University, Columbus, United States","The Ohio State University, Columbus, United States"],"email":"shen.1250@osu.edu","is_corresponding":false,"name":"JINGYI SHEN"},{"affiliations":["The Ohio State University, Columbus, United States","The Ohio State University, Columbus, United States"],"email":"duan.418@osu.edu","is_corresponding":true,"name":"Yuhan Duan"},{"affiliations":["The Ohio State University , Columbus , United States","The Ohio State University , Columbus , United States"],"email":"hwshen@cse.ohio-state.edu","is_corresponding":false,"name":"Han-Wei Shen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1599","image_caption":"In our work, we introduce SurroFlow, a novel normalizing flow-based surrogate model, to learn the invertible transformation between simulation parameters and simulation outputs. The model not only allows accurate predictions of simulation outcomes for a given simulation parameter but also supports uncertainty quantification in the data generation process. Additionally, it enables reverse prediction of simulation parameters of a given simulation data. We integrate SurroFlow and a genetic algorithm as the backend of a visual interface to support effective user-guided ensemble simulation exploration and visualization. ","keywords":["Surrogate model, normalizing flow, uncertainty quantification, parameter space exploration"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.12884","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1599/v-full-1599_Preview.mp4?token=9J85l_VJt9xibns9hdNhObpUAnkxe-sjC9TK-EcqYqE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1599/v-full-1599_Preview.srt?token=2DOw2qRFLbhWNv0ay10L3whVIBwUNO5pk8QCy7n7q1g&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-full","session_youtube_ff_id":"htK9ytzwcDM","session_youtube_ff_link":"https://youtu.be/htK9ytzwcDM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=0h50m2s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T13:18:00Z","title":"SurroFlow: A Flow-Based Surrogate Model for Parameter Space Exploration and Uncertainty Quantification","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1866","abstract":"Feature grid Scene Representation Networks (SRNs) have been applied to scientific data as compact functional surrogates for analysis and visualization. As SRNs are black-box lossy data representations, assessing the prediction quality is critical for scientific visualization applications to ensure that scientists can trust the information being visualized. Currently, existing architectures do not support inference time reconstruction quality assessment, as coordinate-level errors cannot be evaluated in the absence of ground truth data. By employing the uncertain neural network architecture in feature grid SRNs, we obtain prediction variances during inference time to facilitate confidence-aware data reconstruction. Specifically, we propose a parameter-efficient multi-decoder SRN (MDSRN) architecture consisting of a shared feature grid with multiple lightweight multi-layer perceptron decoders. MDSRN can generate a set of plausible predictions for a given input coordinate to compute the mean as the prediction of the multi-decoder ensemble and the variance as a confidence score. The coordinate-level variance can be rendered along with the data to inform the reconstruction quality, or be integrated into uncertainty-aware volume visualization algorithms. To prevent the misalignment between the quantified variance and the prediction quality, we propose a novel variance regularization loss for ensemble learning that promotes the Regularized multi-decoder SRN (RMDSRN) to obtain a more reliable variance that correlates closely to the true model error. We comprehensively evaluate the quality of variance quantification and data reconstruction of Monte Carlo Dropout (MCD), Mean Field Variational Inference (MFVI), Deep Ensemble (DE), and Predicting Variance (PV) in comparison with our proposed MDSRN and RMDSRN applied to state-of-the-art feature grid SRNs across diverse scalar field datasets. We demonstrate that RMDSRN attains the most accurate data reconstruction and competitive variance-error correlation among uncertain SRNs under the same neural network parameter budgets. Furthermore, we present an adaptation of uncertainty-aware volume rendering and shed light on the potential of incorporating uncertain predictions in improving the quality of volume rendering for uncertain SRNs. Through ablation studies on the regularization strength and decoder count, we show that MDSRN and RMDSRN are expected to perform sufficiently well with a default configuration without requiring customized hyperparameter settings for different datasets.","accessible_pdf":false,"authors":[{"affiliations":["The Ohio State University, Columbus, United States"],"email":"xiong.336@osu.edu","is_corresponding":true,"name":"Tianyu Xiong"},{"affiliations":["The Ohio State University, Columbus, United States"],"email":"wurster.18@osu.edu","is_corresponding":false,"name":"Skylar Wolfgang Wurster"},{"affiliations":["The Ohio State University, Columbus, United States","Argonne National Laboratory, Lemont, United States"],"email":"guo.2154@osu.edu","is_corresponding":false,"name":"Hanqi Guo"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"tpeterka@mcs.anl.gov","is_corresponding":false,"name":"Tom Peterka"},{"affiliations":["The Ohio State University , Columbus , United States"],"email":"hwshen@cse.ohio-state.edu","is_corresponding":false,"name":"Han-Wei Shen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1866","image_caption":"By training multiple lightweight decoders and combining a variance regularization in the loss function, regularized multi-decoder SRN (RMDSRN) enables any feature grid SRN to produce uncertain predictions, such that a variance can be computed and visualized for post-training prediction quality assessment. Thanks to the variance regularization, the variances are more likely to resemble the spatial patterns of the actual prediction errors, which are inaccessible during inference time.","keywords":["Scene representation network, deep learning, scientific visualization, ensemble learning"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1866/v-full-1866_Preview.mp4?token=JvYFoDniHtLPq0JNoRUiG6oNdGm3_ZCRaIqpCOhs9yY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1866/v-full-1866_Preview.srt?token=7-modfywyiOsocXo-RX1xgkeCSva1HmuLauuXK2XFW0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-full","session_youtube_ff_id":"Kx3B9acBnOw","session_youtube_ff_link":"https://youtu.be/Kx3B9acBnOw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=1h3m1s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T13:30:00Z","title":"Regularized Multi-Decoder Ensemble for an Error-Aware Scene Representation Network","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233345373","abstract":"Traditional deep learning algorithms assume that all data is available during training, which presents challenges when handling large-scale time-varying data. To address this issue, we propose a data reduction pipeline called knowledge distillation-based implicit neural representation (KD-INR) for compressing large-scale time-varying data. The approach consists of two stages: spatial compression and model aggregation. In the first stage, each time step is compressed using an implicit neural representation with bottleneck layers and features of interest preservation-based sampling. In the second stage, we utilize an offline knowledge distillation algorithm to extract knowledge from the trained models and aggregate it into a single model. We evaluated our approach on a variety of time-varying volumetric data sets. Both quantitative and qualitative results, such as PSNR, LPIPS, and rendered images, demonstrate that KD-INR surpasses the state-of-the-art approaches, including learning-based (i.e., CoordNet, NeurComp, and SIREN) and lossy compression (i.e., SZ3, ZFP, and TTHRESH) methods, at various compression ratios ranging from hundreds to ten thousand.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Jun Han"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hao Zheng"},{"affiliations":"","email":"","is_corresponding":false,"name":"Change Bi"}],"award":"","doi":"10.1109/TVCG.2023.3345373","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233345373","image_caption":"We propose KD-INR, a knowledge distillation-based implicit neural representation, enabling to sequentially compress time-varying data with memory effciency.","keywords":["Time-varying data compression, implicit neural representation, knowledge distillation, volume visualization."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233345373/v-tvcg-20233345373_Preview.mp4?token=G8qsRJ43TvtKl9ixMm-0EzulO0vYdyrc45e6mkBK4Kk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233345373/v-tvcg-20233345373_Preview.srt?token=TxuFh0QqKDQp8HO0I4LFp4rEB6p1OeuIylrsWpoU994&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"wPUZtAngZUk","session_youtube_ff_link":"https://youtu.be/wPUZtAngZUk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=0h0m38s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T12:30:00Z","title":"KD-INR: Time-Varying Volumetric Data Compression via Knowledge Distillation-based Implicit Neural Representation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243365089","abstract":"Implicit Neural representations (INRs) are widely used for scientific data reduction and visualization by modeling the function that maps a spatial location to a data value. Without any prior knowledge about the spatial distribution of values, we are forced to sample densely from INRs to perform visualization tasks like iso-surface extraction which can be very computationally expensive. Recently, range analysis has shown promising results in improving the efficiency of geometric queries, such as ray casting and hierarchical mesh extraction, on INRs for 3D geometries by using arithmetic rules to bound the output range of the network within a spatial region. However, the analysis bounds are often too conservative for complex scientific data. In this paper, we present an improved technique for range analysis by revisiting the arithmetic rules and analyzing the probability distribution of the network output within a spatial region. We model this distribution efficiently as a Gaussian distribution by applying the central limit theorem. Excluding low probability values, we are able to tighten the output bounds, resulting in a more accurate estimation of the value range, and hence more accurate identification of iso-surface cells and more efficient iso-surface extraction on INRs. Our approach demonstrates superior performance in terms of the iso-surface extraction time on four datasets compared to the original range analysis method and can also be generalized to other geometric query tasks.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Haoyu Li"},{"affiliations":"","email":"","is_corresponding":false,"name":"Han-Wei Shen"}],"award":"","doi":"10.1109/TVCG.2024.3365089","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243365089","image_caption":"This image shows the iso-surface extraction results comparison between our approach on the right and the traditional approach on the left. We can only observe minor differences between them. The statistics of the missed iso-surface components also suggest our method preserves the accuracy while being much more efficient than the traditional iso-surface extraction method.","keywords":["Iso-surface extraction, implicit neural representation, uncertainty propagation, affine arithmetic."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2402.13861","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243365089/v-tvcg-20243365089_Preview.mp4?token=YSsQqjdhLKylaN5IeHQC47JZTglIl1_TAOSsSfKkdvc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243365089/v-tvcg-20243365089_Preview.srt?token=phWo3Qr_Bff0OgzJr7-XKTgDhv37s7bU3np7TbOOGmg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"UoEnrW69xCE","session_youtube_ff_link":"https://youtu.be/UoEnrW69xCE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=0h14m11s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T12:42:00Z","title":"Improving Efficiency of Iso-Surface Extraction on Implicit Neural Representations Using Uncertainty Propagation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1185","abstract":"This paper presents an interactive technique to explain visual patterns in network visualizations to analysts who do not understand these visualizations and who are learning to read them. Learning a visualization requires mastering its visual grammar and decoding information presented through visual marks, graphical encodings, and spatial configurations. To help people learn network visualization designs and extract meaningful information, we introduce the concept of interactive pattern explanation that allows viewers to select an arbitrary area in a visualization, then automatically mines the underlying data patterns, and explains both visual and data patterns present in the viewer\u2019s selection. In a qualitative and a quantitative user study with a total of 32 participants, we compare interactive pattern explanations to textual-only and visual-only (cheatsheets) explanations. Our results show that interactive explanations increase learning of i) unfamiliar visualizations, ii) patterns in network science, and iii) the respective network terminology.","accessible_pdf":false,"authors":[{"affiliations":["Newcastle University, Newcastle Upon Tyne, United Kingdom","University of Edinburgh, Edinburgh, United Kingdom"],"email":"xinhuan.shu@gmail.com","is_corresponding":true,"name":"Xinhuan Shu"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"alexis.pister@hotmail.com","is_corresponding":false,"name":"Alexis Pister"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"tangjunxiu@zju.edu.cn","is_corresponding":false,"name":"Junxiu Tang"},{"affiliations":["University of Toronto, Toronto, Canada"],"email":"fanny@dgp.toronto.edu","is_corresponding":false,"name":"Fanny Chevalier"},{"affiliations":["Inria, Bordeaux, France","University of Edinburgh, Edinburgh, United Kingdom"],"email":"bbach@inf.ed.ac.uk","is_corresponding":false,"name":"Benjamin Bach"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1185","image_caption":"We propose Pattern Explainer to help analysts who are unfamiliar with network visualizations learn about visual patterns in the representation of their data. Looking at the visualization, a user spots a visual pattern of interest, e.g. a \u201cbug\u201d-looking pattern in the matrix. To inquire about whether this pattern is meaningful, the user selects the area. Pattern Explainer then automatically mines the selection, against a dictionary of network motifs, and provides the user with explanations of what underlying network patterns the visual pattern reveals.","keywords":["Visualization education, network visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/pdf/2408.01272","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1185/v-full-1185_Preview.mp4?token=Cgmabne65gfCNXK-NMIeiuk8fYX4EieVqafAEjZboQ8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1185/v-full-1185_Preview.srt?token=QBN_50P54Pb6D33gG6W1ws6A2HHVjLAE4HfhX4NEALs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-full","session_youtube_ff_id":"XYAcTewN_E8","session_youtube_ff_link":"https://youtu.be/XYAcTewN_E8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=0h26m22s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T12:54:00Z","title":"Does This Have a Particular Meaning?: Interactive Pattern Explanation for Network Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1606","abstract":"With the increase of graph size, it becomes difficult or even impossible to visualize graph structures clearly within the limited screen space. Consequently, it is crucial to design effective visual representations for large graphs. In this paper, we propose AdaMotif, a novel approach that can capture the essential structure patterns of large graphs and effectively reveal the overall structures via adaptive motif designs. Specifically, our approach involves partitioning a given large graph into multiple subgraphs, then clustering similar subgraphs and extracting similar structural information within each cluster. Subsequently, adaptive motifs representing each cluster are generated and utilized to replace the corresponding subgraphs, leading to a simplified visualization. Our approach aims to preserve as much information as possible from the subgraphs while simplifying the graph efficiently. Notably, our approach successfully visualizes crucial community information within a large graph. We conduct case studies and a user study using real-world graphs to validate the effectiveness of our proposed approach. The results demonstrate the capability of our approach in simplifying graphs while retaining important structural and community information.","accessible_pdf":false,"authors":[{"affiliations":["Shenzhen University, Shenzhen, China"],"email":"hzhou@szu.edu.cn","is_corresponding":true,"name":"Hong Zhou"},{"affiliations":["Shenzhen University, Shenzhen, China"],"email":"laipeifeng1111@gmail.com","is_corresponding":false,"name":"Peifeng Lai"},{"affiliations":["Shenzhen University, Shenzhen, China"],"email":"zhida.sun@connect.ust.hk","is_corresponding":false,"name":"Zhida Sun"},{"affiliations":["Shenzhen University, Shenzhen, China"],"email":"2310274034@email.szu.edu.cn","is_corresponding":false,"name":"Xiangyuan Chen"},{"affiliations":["Shenzhen University, Shen Zhen, China"],"email":"275621136@qq.com","is_corresponding":false,"name":"Yang Chen"},{"affiliations":["Shenzhen University, Shenzhen, China"],"email":"hswu@szu.edu.cn","is_corresponding":false,"name":"Huisi Wu"},{"affiliations":["Nanyang Technological University, Singapore, Singapore"],"email":"yong-wang@ntu.edu.sg","is_corresponding":false,"name":"Yong WANG"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1606","image_caption":"Case analysis of the Cpan dataset: (a) the original graph; (b) our AdaMotif. The highlighted areas of each subfigure show the enlarged communities. We highlight identical communities for comparison. The identical communities are marked using \"The same community\". In (a), to make communities easier to identify, their nodes and edges are highlighted in blue and red, respectively. In (b), motifs with the same color and similar shape represent similar communities. The size of the motif indicates the number of nodes in this community. Our result provides a clearer expression of community information.","keywords":["Graph visualization, node-link diagrams, graph simplification"],"open_access_supplemental_link":"https://osf.io/pb8t3/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1606/v-full-1606_Preview.mp4?token=XNAI--kxzKJbfnEWfEQn-z-dQs-XeCUps7OttO-Ri_8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1606/v-full-1606_Preview.srt?token=yyU1vwaX2Yxq1I7tJ61fvdARcrkp7cxtk-SnmGbGUKw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-full","session_youtube_ff_id":"gWWaEplNEMQ","session_youtube_ff_link":"https://youtu.be/gWWaEplNEMQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=0h51m6s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T13:18:00Z","title":"AdaMotif: Graph Simplification via Adaptive Motif Design","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1693","abstract":"We introduce a visual analysis method for multiple causal graphs with different outcome variables, namely, multi-outcome causal graphs. Multi-outcome causal graphs are important in healthcare for understanding multimorbidity and comorbidity. To support the visual analysis, we collaborated with medical experts to devise two comparative visualization techniques at different stages of the analysis process. First, a progressive visualization method is proposed for comparing multiple state-of-the-art causal discovery algorithms. The method can handle mixed-type datasets comprising both continuous and categorical variables and assist in the creation of a fine-tuned causal graph of a single outcome. Second, a comparative graph layout technique and specialized visual encodings are devised for the quick comparison of multiple causal graphs. In our visual analysis approach, analysts start by building individual causal graphs for each outcome variable, and then, multi-outcome causal graphs are generated and visualized with our comparative technique for analyzing differences and commonalities of these causal graphs. Evaluation includes quantitative measurements on benchmark datasets, a case study with a medical expert, and expert user studies with real-world health research data.","accessible_pdf":true,"authors":[{"affiliations":["Institute of Medical Technology, Peking University Health Science Center, Beijing, China","National Institute of Health Data Science, Peking University, Beijing, China"],"email":"mengjiefan@bjmu.edu.cn","is_corresponding":true,"name":"Mengjie Fan"},{"affiliations":["Chalmers University of Technology, Gothenburg, Sweden","Peking University, Beijing, China"],"email":"yu.jinlu@qq.com","is_corresponding":false,"name":"Jinlu Yu"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"},{"affiliations":["Tongji College of Design and Innovation, Shanghai, China"],"email":"nan.cao@gmail.com","is_corresponding":false,"name":"Nan Cao"},{"affiliations":["Beijing University of Chinese Medicine, Beijing, China"],"email":"wanghuaiyuelva@126.com","is_corresponding":false,"name":"Huaiyu Wang"},{"affiliations":["Peking University, Beijing, China"],"email":"zhoulng@pku.edu.cn","is_corresponding":false,"name":"Liang Zhou"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1693","image_caption":"The case study of the UK Biobank data with a medical expert using our method. In the first stage of \"single causal graph analysis\" (1\u20134), the expert explores and edits single causal graphs using the progressive comparative visualization of three state-of-the-art causal discovery techniques (2-4) in combination with her domain knowledge. In the second stage of \"multi-outcome causal graphs comparison\" (5, 6), she selects graphs of interested outcome for comparison using various layouts, including the supergraph (5), and our new comparable layout for subgraphs (6). ","keywords":["Causal graph visualization and visual analysis, causal discovery, comparative visualization, visual analysis in medicine"],"open_access_supplemental_link":"https://github.com/mengjiefan/multi_outcome/tree/vis_rev_sub","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.02679","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1693/v-full-1693_Preview.mp4?token=ovYGwzD9MYXQVpPzgVA8YPTLXY9HO1ombY8hfQ3Uh48&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-full","session_youtube_ff_id":"bu5PgW9Q6Kg","session_youtube_ff_link":"https://youtu.be/bu5PgW9Q6Kg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=0h0m57s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T12:30:00Z","title":"Visual Analysis of Multi-outcome Causal Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1746","abstract":"Hypergraphs provide a natural way to represent polyadic relationships in network data. For large hypergraphs, it is often difficult to visually detect structures within the data. Recently, a scalable polygon-based visualization approach was developed allowing hypergraphs with thousands of hyperedges to be simplified and examined at different levels of detail. However, this approach is not guaranteed to eliminate all of the visual clutter caused by unavoidable overlaps. Furthermore, meaningful structures can be lost at simplified scales, making their interpretation unreliable. In this paper, we define hypergraph structures using the bipartite graph representation, allowing us to decompose the hypergraph into a union of structures including topological blocks, bridges, and branches, and to identify exactly where unavoidable overlaps must occur. We also introduce a set of topology preserving and topology altering atomic operations, enabling the preservation of important structures while reducing unavoidable overlaps to improve visual clarity and interpretability in simplified scales. We demonstrate our approach in several real-world applications.","accessible_pdf":false,"authors":[{"affiliations":["Oregon State University, Corvallis, United States"],"email":"oliverpe@oregonstate.edu","is_corresponding":false,"name":"Peter D Oliver"},{"affiliations":["Oregon State University, Corvallis, United States"],"email":"zhange@eecs.oregonstate.edu","is_corresponding":false,"name":"Eugene Zhang"},{"affiliations":["Oregon State University, Corvallis, United States"],"email":"zhangyue@oregonstate.edu","is_corresponding":false,"name":"Yue Zhang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1746","image_caption":"We present a structure-guided simplification scheme for hypergraphs. Given an input hypergraph (left), we identify a cycle basis for its bipartite graph representation (middle). Using the basis cycles, we decompose the hypergraph into a union of topological blocks (purple bubbles), bridges, and branches (green bubbles). We apply minimal cycle collapse and cycle cut simplifications to eliminate unavoidable overlaps in the topological blocks, and apply leaf pruning simplifications to reduce the space required by bridges and branches. Our simplification prioritizes preserving long cycles, bridges, and branches so that the most significant structures are kept in the simplified results (right).","keywords":["Hypergraph Visualization, Hypergraph Simplification, Hypergraph Topology, Bipartite Representation"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.19621","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1746/v-full-1746_Preview.mp4?token=ZRuLy-4QoGhRaO17UYqa07iEi1dYdlexH3G8P14LWxU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-full","session_youtube_ff_id":"kP6irewadAE","session_youtube_ff_link":"https://youtu.be/kP6irewadAE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=0h14m18s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T12:42:00Z","title":"Structure-Aware Simplification for Hypergraph Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1831","abstract":"When using exploratory visual analysis to examine multivariate hierarchical data, users often need to query data to narrow down the scope of analysis. However, formulating effective query expressions remains a challenge for multivariate hierarchical data, particularly when datasets become very large. To address this issue, we develop a declarative grammar,HiRegEx (Hierarchical data Regular Expression), for querying and exploring multivariate hierarchical data. Rooted in the extended multi-level task topology framework for tree visualizations (e-MLTT), HiRegEx delineates three query targets (node, path, and subtree) and two aspects for querying these targets (features and positions), and uses operators developed based on classical regular expressions for query construction. Based on the HiRegEx grammar, we develop an exploratory framework for querying and exploring multivariate hierarchical data and integrate it into the TreeQueryER prototype system. The exploratory framework includes three major components: top-down pattern specification, bottom-up data-driven inquiry, and context-creation data overview. We validate the expressiveness of HiRegEx with the tasks from the e-MLTT framework and showcase the utility and effectiveness ofTreeQueryER system through a case study involving expert users in the analysis of a citation tree dataset.","accessible_pdf":false,"authors":[{"affiliations":["Beijing Institute of Technology, Beijing, China"],"email":"guozhg.li@gmail.com","is_corresponding":false,"name":"Guozheng Li"},{"affiliations":["Beijing Institute of Technology, Beijing, China"],"email":"haotian.mi1@gmail.com","is_corresponding":false,"name":"haotian mi"},{"affiliations":["Beijing Institute of Technology, Beijing, China"],"email":"liuchi02@gmail.com","is_corresponding":false,"name":"Chi Harold Liu"},{"affiliations":["Ochanomizu University, Tokyo, Japan"],"email":"itot@is.ocha.ac.jp","is_corresponding":false,"name":"Takayuki Itoh"},{"affiliations":["Beijing Institute of Technology, Beijing, China"],"email":"wanggrbit@126.com","is_corresponding":false,"name":"Guoren Wang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1831","image_caption":"The exploratory framework for querying multivariate hierarchical data comprises three modes: top-down, bottom-up, and context-creation. The top-down mode starts from a clear query task. Users construct the corresponding query expression through direct manipulations interactively. The bottom-up mode recommends related query expressions based on the initial expression and the multivariate hierarchical data collection. The context-creation mode offers users an overview of the entire hierarchical data collection. Modules associated with the top-down, bottom-up, and context creation modes in the framework are denoted by red, orange, and blue triangles. ","keywords":["Multivariate hierarchical data, declarative grammar, visual query"],"open_access_supplemental_link":"https://github.com/bitvis2021/HiRegEx","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1831/v-full-1831_Preview.mp4?token=n9QVeFNmeHdmGxNyALHyOSelyM2wK96nkpNFEk5JK20&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1831/v-full-1831_Preview.srt?token=ZgipMgJhWQR58B78D58zTFJOIAOOffNjTYK6RJMyoOA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-full","session_youtube_ff_id":"7q67dSgbZCI","session_youtube_ff_link":"https://youtu.be/7q67dSgbZCI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=1h5m34s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T13:30:00Z","title":"HiRegEx: Interactive Visual Query and Exploration of Multivariate Hierarchical Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233306356","abstract":"A multitude of studies have been conducted on graph drawing, but many existing methods only focus on optimizing a single aesthetic aspect of graph layouts. There are a few existing methods that attempt to develop a flexible solution for optimizing different aesthetic aspects measured by different aesthetic criteria. Furthermore, thanks to the significant advance in deep learning techniques, several deep learning-based layout methods were proposed recently, which have demonstrated the advantages of the deep learning approaches for graph drawing. However, none of these existing methods can be directly applied to optimizing non-differentiable criteria without special accommodation. In this work, we propose a novel Generative Adversarial Network (GAN) based deep learning framework for graph drawing, called SmartGD, which can optimize any quantitative aesthetic goals even though they are non-differentiable. In the cases where the aesthetic goal is too abstract to be described mathematically, SmartGD can draw graphs in a similar style as a collection of good layout examples, which might be selected by humans based on the abstract aesthetic goal. To demonstrate the effectiveness and efficiency of SmartGD, we conduct experiments on minimizing stress, minimizing edge crossing, maximizing crossing angle, and a combination of multiple aesthetics. Compared with several popular graph drawing algorithms, the experimental results show that SmartGD achieves good performance both quantitatively and qualitatively.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Xiaoqi Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kevin Yen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yifan Hu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Han-Wei Shen"}],"award":"","doi":"10.1109/TVCG.2023.3306356","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233306356","image_caption":"SmartGD is a novel deep-learning framework for graph drawing, which can optimize any quantitative aesthetics. It is a GAN-based framework in which the generator learns to draw graphs, and the discriminator serves as a judge of the layout quality. Also, we introduce a unique self-challenging mechanism that continuously improves the quality of real layouts during training. Feel free to check our paper and code for more details.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233306356/v-tvcg-20233306356_Preview.mp4?token=4FeNN97BbtmlhvsZMB10GJ5NdeysTGNjtZGcEi2MjHk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233306356/v-tvcg-20233306356_Preview.srt?token=e0TXzPdvcYtaKAftmwaAOpQueUFI2YNUVbmpC3fSPn8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-tvcg","session_youtube_ff_id":"o-j0BsCaXoU","session_youtube_ff_link":"https://youtu.be/o-j0BsCaXoU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=0h38m53s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T13:06:00Z","title":"SmartGD: A GAN-Based Graph Drawing Framework for Diverse Aesthetic Goals","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1395","abstract":"Onboarding a user to a visualization dashboard entails explaining its various components, including the chart types used, the data loaded, and the interactions available. Authoring such an onboarding experience is time-consuming and requires significant knowledge and little guidance on how best to complete this task. Depending on their levels of expertise, end users being onboarded to a new dashboard can be either confused and overwhelmed or disinterested and disengaged. We propose interactive dashboard tours (D-Tours) as semi-automated onboarding experiences that preserve the agency of users with various levels of expertise to keep them interested and engaged. Our interactive tours concept draws from open-world game design to give the user freedom in choosing their path through onboarding. We have implemented the concept in a tool called D-TOUR PROTOTYPE, which allows authors to craft custom interactive dashboard tours from scratch or using automatic templates. Automatically generated tours can still be customized to use different media (e.g., video, audio, and highlighting) or new narratives to produce an onboarding experience tailored to an individual user. We demonstrate the usefulness of interactive dashboard tours through use cases and expert interviews. Our evaluation shows that authors found the automation in the D-Tour Prototype helpful and time-saving, and users found the created tours engaging and intuitive. This paper and all supplemental materials are available at https://osf.io/6fbjp/.","accessible_pdf":false,"authors":[{"affiliations":["Pro2Future GmbH, Linz, Austria","Johannes Kepler University, Linz, Austria"],"email":"vaishali.dhanoa@pro2future.at","is_corresponding":true,"name":"Vaishali Dhanoa"},{"affiliations":["Johannes Kepler University, Linz, Austria"],"email":"andreas.hinterreiter@jku.at","is_corresponding":false,"name":"Andreas Hinterreiter"},{"affiliations":["Johannes Kepler University, Linz, Austria"],"email":"vanessa.fediuk@jku.at","is_corresponding":false,"name":"Vanessa Fediuk"},{"affiliations":["Aarhus University, Aarhus, Denmark"],"email":"elm@cs.au.dk","is_corresponding":false,"name":"Niklas Elmqvist"},{"affiliations":["Institute of Visual Computing "," Human-Centered Technology, Vienna, Austria"],"email":"groeller@cg.tuwien.ac.at","is_corresponding":false,"name":"Eduard Gr\u00f6ller"},{"affiliations":["Johannes Kepler University Linz, Linz, Austria"],"email":"marc.streit@jku.at","is_corresponding":false,"name":"Marc Streit"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1395","image_caption":"D-Tour Prototype Authoring Mode. Authors pick (a) automatically extracted visualization categories, General, Insight, or Interaction from the Content Extraction View and drag them to the Content Arrangement View, where they (b) arrange them, (b.1) thus crafting a tour and (b.2) adding explanations to the tour content. In the Dissemination View they (c) test changes before disseminating them. A selection of the Column Chart General in the Content Extraction View is shown which is highlighted in the Content Arrangement View and in the Dissemination View. Its associated content can be seen in (b.2)","keywords":["Dashboards, onboarding, storytelling, tutorial, interactive tours, open-world games"],"open_access_supplemental_link":"https://osf.io/6fbjp/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/t5m3u","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1395/v-full-1395_Preview.mp4?token=V1XLccn7O78RyqyQxzrfDBtx39_DAamtvybdh6UTnM0&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-full","session_youtube_ff_id":"S6366DrJQTs","session_youtube_ff_link":"https://youtu.be/S6366DrJQTs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h37m24s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T13:18:00Z","title":"D-Tour: Semi-Automatic Generation of Interactive Guided Tours for Visualization Dashboard Onboarding","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1416","abstract":"Various data visualization applications such as reverse engineering and interactive authoring require a vocabulary that describes the structure of visualization scenes and the procedure to manipulate them. A few scene abstractions have been proposed, but they are restricted to specific applications for a limited set of visualization types. A unified and expressive model of data visualization scenes for different applications has been missing. To fill this gap, we present Manipulable Semantic Components (MSC), a computational representation of data visualization scenes, to support applications in scene understanding and augmentation. MSC consists of two parts: a unified object model describing the structure of a visualization scene in terms of semantic components, and a set of operations to generate and modify the scene components. We demonstrate the benefits of MSC in three applications: visualization authoring, visualization deconstruction and reuse, and animation specification.","accessible_pdf":false,"authors":[{"affiliations":["University of Maryland, College Park, United States"],"email":"leozcliu@umd.edu","is_corresponding":false,"name":"Zhicheng Liu"},{"affiliations":["University of Maryland, College Park, United States"],"email":"cchen24@umd.edu","is_corresponding":true,"name":"Chen Chen"},{"affiliations":["University of Maryland, College Park, United States"],"email":"hookerj100@gmail.com","is_corresponding":false,"name":"John Hooker"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1416","image_caption":"We present Manipulable Semantic Components (MSC), a computational representation of data visualization scenes. MSC consists of two parts: a unified object model describing the structure of a visualization scene, and a set of operations to generate and modify the scene components. We demonstrate the benefits of MSC in three case studies.","keywords":["data visualization, scene abstraction, visualization model"],"open_access_supplemental_link":"https://mascot-vis.github.io/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1416/v-full-1416_Preview.mp4?token=KRrGLYZSqDhpGaEVGwui2BSZujcpuzBKO2MyQZHZ4OU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1416/v-full-1416_Preview.srt?token=A1hctGLK3p57jh9I2IfrtOvzUOMN5Es0ScMbfjuDbR4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-full","session_youtube_ff_id":"4IYhlRFnM64","session_youtube_ff_link":"https://youtu.be/4IYhlRFnM64","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h50m1s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T13:30:00Z","title":"Manipulable Semantic Components: a Computational Representation of Data Visualization Scenes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1472","abstract":"Trained on vast corpora, Large Language Models (LLMs) have the potential to encode visualization design knowledge and best practices. However, if they fail to do so, they might provide unreliable visualization recommendations. What visualization design preferences, then, have LLMs learned? We contribute DracoGPT, a method for extracting, modeling, and assessing visualization design preferences from LLMs. To assess varied tasks, we develop two pipelines--DracoGPT-Rank and DracoGPT-Recommend--to model LLMs prompted to either rank or recommend visual encoding specifications. We use Draco as a shared knowledge base in which to represent LLM design preferences and compare them to best practices from empirical research. We demonstrate that DracoGPT can accurately model the preferences expressed by LLMs, enabling analysis in terms of Draco design constraints. Across a suite of backing LLMs, we find that DracoGPT-Rank and DracoGPT-Recommend moderately agree with each other, but both substantially diverge from guidelines drawn from human subjects experiments. Future work can build on our approach to expand Draco's knowledge base to model a richer set of preferences and to provide a robust and cost-effective stand-in for LLMs.","accessible_pdf":false,"authors":[{"affiliations":["University of Washington, Seattle, United States"],"email":"wwill@cs.washington.edu","is_corresponding":true,"name":"Huichen Will Wang"},{"affiliations":["University of Washington, Seattle, United States"],"email":"mgord@cs.stanford.edu","is_corresponding":false,"name":"Mitchell L. Gordon"},{"affiliations":["University of Washington, Seattle, United States"],"email":"leibatt@cs.washington.edu","is_corresponding":false,"name":"Leilani Battle"},{"affiliations":["University of Washington, Seattle, United States"],"email":"jheer@uw.edu","is_corresponding":false,"name":"Jeffrey Heer"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1472","image_caption":"DracoGPT is a method for extracting, modeling, and assessing visualization design preferences from LLMs. We develop two pipelines--DracoGPT-Rank and DracoGPT-Recommend--to model LLMs prompted to either rank or recommend visual encoding specifications. We use Draco as a shared knowledge base in which to represent LLM design preferences and compare them to best practices from empirical research. The image shown summarizes the pipeline for DracoGPT-Rank.","keywords":["Visualization, Large Language Models, Visualization Recommendation, Graphical Perception"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1472/v-full-1472_Preview.mp4?token=kwZ9vXZXTmgiZroZv-H25sDIQX0qQpBLBTsHkJ0-Xw8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-full","session_youtube_ff_id":"Y-lg3iu3-o4","session_youtube_ff_link":"https://youtu.be/Y-lg3iu3-o4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h13m11s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T12:42:00Z","title":"DracoGPT: Extracting Visualization Design Preferences from Large Language Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233316469","abstract":"Automated visualization recommendation facilitates the rapid creation of effective visualizations, which is especially beneficial for users with limited time and limited knowledge of data visualization. There is an increasing trend in leveraging machine learning (ML) techniques to achieve an end-to-end visualization recommendation. However, existing ML-based approaches implicitly assume that there is only one appropriate visualization for a specific dataset, which is often not true for real applications. Also, they often work like a black box, and are difficult for users to understand the reasons for recommending specific visualizations. To fill the research gap, we propose AdaVis, an adaptive and explainable approach to recommend one or multiple appropriate visualizations for a tabular dataset. It leverages a box embedding-based knowledge graph to well model the possible one-to-many mapping relations among different entities (i.e., data features, dataset columns, datasets, and visualization choices). The embeddings of the entities and relations can be learned from dataset-visualization pairs. Also, AdaVis incorporates the attention mechanism into the inference framework. Attention can indicate the relative importance of data features for a dataset and provide fine-grained explainability. Our extensive evaluations through quantitative metric evaluations, case studies, and user interviews demonstrate the effectiveness of AdaVis.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Songheng Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yong Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haotian Li"},{"affiliations":"","email":"","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"10.1109/TVCG.2023.3316469","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233316469","image_caption":"The figures show four pairs of visualizations recommended for four different datasets. Visualizations in the same column are for the same dataset. The explanation of the recommendation results is at the bottom. The top two features are described in the explanations to illustrate the recommendation results.","keywords":["Visualization Recommendation, Logical Reasoning, Data Visualization, Knowledge Graph"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233316469/v-tvcg-20233316469_Preview.mp4?token=wCwBEW6oJAqCGaF_7k5HslvCKP45i5gj1POdYH9qKtc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233316469/v-tvcg-20233316469_Preview.srt?token=djcHDOkGOsLAPdzbvC_3CO12Lcv1PjuJHjIGl_j0bV4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-tvcg","session_youtube_ff_id":"84XqN9j09X0","session_youtube_ff_link":"https://youtu.be/84XqN9j09X0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h1m11s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T12:30:00Z","title":"AdaVis: Adaptive and Explainable Visualization Recommendation for Tabular Data'","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243374571","abstract":"Visualization Recommendation Systems (VRSs) are a novel and challenging field of study aiming to help generate insightful visualizations from data and support non-expert users in information discovery. Among the many contributions proposed in this area, some systems embrace the ambitious objective of imitating human analysts to identify relevant relationships in data and make appropriate design choices to represent these relationships with insightful charts. We denote these systems as \"agnostic\" VRSs since they do not rely on human-provided constraints and rules but try to learn the task autonomously. Despite the high application potential of agnostic VRSs, their progress is hindered by several obstacles, including the absence of standardized datasets to train recommendation algorithms, the difficulty of learning design rules, and defining quantitative criteria for evaluating the perceptual effectiveness of generated plots. This paper summarizes the literature on agnostic VRSs and outlines promising future research directions.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Luca Podo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Bardh Prenkaj"},{"affiliations":"","email":"","is_corresponding":false,"name":"Paola Velardi"}],"award":"","doi":"10.1109/TVCG.2024.3374571","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243374571","image_caption":"Workflow of Agnostic Visual Recommender Systems (A-VRSs): First (Figure 1 up), the model is trained with data-visualization pairs, to learn both to identify relevant relationships between data and to visualize them in the best possible way. Next (Figure 1 down), the learned model recommends a set of possibly insightful visualizations from new datasets at inference time.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243374571/v-tvcg-20243374571_Preview.mp4?token=KLfl87EHjbGIpm2yPdsmIpwf2cM0nXpQgl2CbVHhXKA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243374571/v-tvcg-20243374571_Preview.srt?token=K44xpC8trhsmHcAEor-rRfBqY382d4LraLLQEK7M2gY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-tvcg","session_youtube_ff_id":"qDYK_aAqIW8","session_youtube_ff_link":"https://youtu.be/qDYK_aAqIW8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h26m44s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T13:06:00Z","title":"Agnostic Visual Recommendation Systems: Open Challenges and Future Directions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243383089","abstract":"The advances in AI-enabled techniques have accelerated the creation and automation of visualizations in the past decade. However, presenting visualizations in a descriptive and generative format remains a challenge. Moreover, current visualization embedding methods focus on standalone visualizations, neglecting the importance of contextual information for multi-view visualizations. To address this issue, we propose a new representation model, Chart2Vec, to learn a universal embedding of visualizations with context-aware information. Chart2Vec aims to support a wide range of downstream visualization tasks such as recommendation and storytelling. Our model considers both structural and semantic information of visualizations in declarative specifications. To enhance the context-aware capability, Chart2Vec employs multi-task learning on both supervised and unsupervised tasks concerning the cooccurrence of visualizations. We evaluate our method through an ablation study, a user study, and a quantitative comparison. The results verified the consistency of our embedding method with human cognition and showed its advantages over existing methods.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Qing Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ying Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ruishi Zou"},{"affiliations":"","email":"","is_corresponding":false,"name":"Wei Shuai"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yi Guo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jiazhe Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nan Cao"}],"award":"","doi":"10.1109/TVCG.2024.3383089","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243383089","image_caption":"To capture the information of a single visualization, we designed the Chart2Vec model. The input embedding module transforms the raw data into a vector format containing both fact schema and fact semantics, the encoder module then employs feature pooling and feature fusion to achieve the final vector representation. ","keywords":["Representation Learning, Multi-view Visualization, Visual Storytelling, Visualization Embedding"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2306.08304","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243383089/v-tvcg-20243383089_Preview.mp4?token=YaziDZQuh9K5a72_sHE8lgKpS45Ez70snplKQ4wqrAI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243383089/v-tvcg-20243383089_Preview.srt?token=f81u_zS7B44sxxCqfko0XEpX-AvGZ0OHaZCSIa6_lIg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-tvcg","session_youtube_ff_id":"b1lGAY8V3S4","session_youtube_ff_link":"https://youtu.be/b1lGAY8V3S4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h13m19s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T12:54:00Z","title":"Chart2Vec: A Universal Embedding of Context-Aware Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1059","abstract":"Digital twin models are of high interest to Head and Neck Cancer (HNC) oncologists, who have to navigate a series of complex treatment decisions that weigh the efficacy of tumor control against toxicity and mortality risks. Evaluating individual risk profiles necessitates a deeper understanding of the interplay between different factors such as patient health, spatial tumor location and spread, and risk of subsequent toxicities that can not be adequately captured through simple heuristics. To support clinicians in better understanding tradeoffs when deciding on treatment courses, we developed DITTO, a digital-twin and visual computing system that allows clinicians to analyze detailed risk profiles for each patient, and decide on a treatment plan. DITTO relies on a sequential Deep Reinforcement Learning digital twin (DT) to deliver personalized risk of both long-term and short-term disease outcome and toxicity risk for HNC patients. Based on a participatory collaborative design alongside oncologists, we also implement several visual explainability methods to promote clinical trust and encourage healthy skepticism when using our system. We evaluate the efficacy of DITTO through quantitative evaluation of performance and case studies with qualitative feedback. Finally, we discuss design lessons for developing clinical visual XAI applications for clinical end users.","accessible_pdf":false,"authors":[{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"awentze2@uic.edu","is_corresponding":true,"name":"Andrew Wentzel"},{"affiliations":["University of Houston, Houston, United States"],"email":"skattia@mdanderson.org","is_corresponding":false,"name":"Serageldin Attia"},{"affiliations":["University of Illinois Chicago, Chicago, United States"],"email":"zhangz@uic.edu","is_corresponding":false,"name":"Xinhua Zhang"},{"affiliations":["University of Iowa, Iowa City, United States"],"email":"guadalupe-canahuate@uiowa.edu","is_corresponding":false,"name":"Guadalupe Canahuate"},{"affiliations":["University of Texas, Houston, United States"],"email":"cdfuller@mdanderson.org","is_corresponding":false,"name":"Clifton David Fuller"},{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"g.elisabeta.marai@gmail.com","is_corresponding":false,"name":"G. Elisabeta Marai"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1059","image_caption":"Overview of DITTO. (A) Input panel to alter model parameters and input patient features. (B) Temporal outcome risk plots for the patient based on different models and treatment groups. (C) Treatment recommendation based on the twin model and similar patients. (D) Auxiliary data panel, currently showing a waterfall plot of how each feature cumulatively contributes to the model decision.","keywords":["Medicine; Machine Learning; Application Domains; High Dimensional data; Spatial Data; Activity Centered Design"],"open_access_supplemental_link":"https://osf.io/qhu7f/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.48550/arXiv.2407.13107","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1059/v-full-1059_Preview.mp4?token=36Loh-7OLpLaxVBBBoJ5FVLn6dJxoh991w5oHDdNbKQ&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-full","session_youtube_ff_id":"4AmQkVSrVdE","session_youtube_ff_link":"https://youtu.be/4AmQkVSrVdE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=1h3m8s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T18:45:00Z","title":"DITTO: A Visual Digital Twin for Interventions and Temporal Treatment Outcomes in Head and Neck Cancer","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1805","abstract":"The optimization of cooling systems is important in many cases, for example for cabin and battery cooling in electric cars. Such an optimization is governed by multiple, conflicting objectives and it is performed across a multi-dimensional parameter space.The extent of the parameter space, the complexity of the non-linear model of the system,as well as the time needed per simulation run and factors that are not modeled in the simulation necessitate an iterative, semi-automatic approach. We present an interactive visual optimization approach, where the user works with a p-h diagram to steer an iterative, guided optimization process. A deep learning (DL) model provides estimates for parameters, given a target characterization of the system, while numerical simulation is used to compute system characteristics for an ensemble of parameter sets. Since the DL model only serves as an approximation of the inverse of the cooling system and since target characteristics can be chosen according to different, competing objectives, an iterative optimization process is realized, developing multiple sets of intermediate solutions, which are visually related to each other.The standard p-h diagram, integrated interactively in this approach, is complemented by a dual, also interactive visual representation of additional expressive measures representing the system characteristics. We show how the known four-points semantic of the p-h diagram meaningfully transfers to the dual data representation.When evaluating this approach in the automotive domain, we found that our solution helped with the overall comprehension of the cooling system and that it lead to a faster convergence during optimization. ","accessible_pdf":false,"authors":[{"affiliations":["VRVis Research Center, Vienna, Austria"],"email":"splechtna@vrvis.at","is_corresponding":true,"name":"Rainer Splechtna"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"behravan@vt.edu","is_corresponding":false,"name":"Majid Behravan"},{"affiliations":["AVL AST doo, Zagreb, Croatia"],"email":"mario.jelovic@avl.com","is_corresponding":false,"name":"Mario Jelovic"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"gracanin@vt.edu","is_corresponding":false,"name":"Denis Gracanin"},{"affiliations":["University of Bergen, Bergen, Norway"],"email":"helwig.hauser@uib.no","is_corresponding":false,"name":"Helwig Hauser"},{"affiliations":["VRVis Research Center, Vienna, Austria"],"email":"matkovic@vrvis.at","is_corresponding":false,"name":"Kresimir Matkovic"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1805","image_caption":"The interactive p-h diagram, central to interactive design of experiments for cooling systems, presents multiple layers of information: user-defined desired points (in shades of red), simulated points generated by parameters predicted through deep learning (shades of blue), and scatterplots offering a dual data perspective (with lines connecting Deep Learning prediction and simulation for the same parameters). ","keywords":["Parameter space exploration"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.12607","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1805/v-full-1805_Preview.mp4?token=vbIksADqVloSgv_7CEcnLkzX0sf30vsoN5mp0KBDjH0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1805/v-full-1805_Preview.srt?token=4CbReupRi8r5i9I_rOR3J4R7GGd_rHw_7goN2_bpA14&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-full","session_youtube_ff_id":"zGpaBxAqkHw","session_youtube_ff_link":"https://youtu.be/zGpaBxAqkHw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=0h38m51s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T18:21:00Z","title":"Interactive Design-of-Experiments: Optimizing a Cooling System","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1865","abstract":"In medical diagnostics of both early disease detection and routine patient care, particle-based contamination of in-vitro diagnostics consumables poses a significant threat to patients. Objective data-driven decision-making on the severity of contamination is key for reducing patient risk, while saving time and cost in quality assessment. Our collaborators introduced us to their quality control process, including particle data acquisition through image recognition, feature extraction, and attributes reflecting the production context of particles. Shortcomings in the current process are limitations in exploring thousands of images, data-driven decision making, and ineffective knowledge externalization. Following the design study methodology, our contributions are a characterization of the problem space and requirements, the development and validation of DaedalusData, a comprehensive discussion of our study's learnings, and a generalizable framework for knowledge externalization. DaedalusData is a visual analytics system that enables domain experts to explore particle contamination patterns, label particles in label alphabets, and externalize knowledge through semi-supervised label-informed data projections. The results of our case study and user study show high usability of DaedalusData and its efficient support of experts in generating comprehensive overviews of thousands of particles, labeling of large quantities of particles, and externalizing knowledge to augment the dataset further. Reflecting on our approach, we discuss insights on dataset augmentation via human knowledge externalization, and on the scalability and trade-offs that come with the adoption of this approach in practice.","accessible_pdf":false,"authors":[{"affiliations":["Roche pRED, Basel, Switzerland","University of Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"alexander.wyss@protonmail.com","is_corresponding":false,"name":"Alexander Wyss"},{"affiliations":["University of Zurich, Zurich, Switzerland","Digital Society Initiativ, Zurich, Switzerland"],"email":"gab.morgenshtern@gmail.com","is_corresponding":false,"name":"Gabriela Morgenshtern"},{"affiliations":["Roche Diagnostics International, Rotkreuz, Switzerland"],"email":"a.hirschhuesler@gmail.com","is_corresponding":false,"name":"Amanda Hirsch-H\u00fcsler"},{"affiliations":["University of Zurich, Zurich, Switzerland","Digital Society Initiativ, Zurich, Switzerland"],"email":"bernard@ifi.uzh.ch","is_corresponding":false,"name":"J\u00fcrgen Bernard"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1865","image_caption":"The DaedalusData framework supports two control modes for experts to steer the particle display with, shown here as a 2 \u00d7 2 matrix. Vertical: Experts choose between the Attribute View (for one attribute) and the Projection View (for multiple user-specified attributes) to identify areas of interest, and discover similar particles to label. Horizontal: Experts choose to explore either the Pre-Existing Data Attributes (the Image & Production Context), or to extend the exploration to Augmented Data Attributes created through particle labeling (Expert Knowledge). This design study implements a systematic cross-cut of all four types of control, addressing expert-contributed design requirements. ","keywords":["Visual Analytics, Image Data, Knowledge Externalization, Data Labeling, Anomaly Detection, Medical Manufacturing"],"open_access_supplemental_link":"https://github.com/alexv710/DaedalusData---IEEE-VIS-Supplemental","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.04749","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1865/v-full-1865_Preview.mp4?token=0EQEAR0eBMnTqI4vpUWiBIPLB73GJ3Ed4bw9fMtXw0A&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1865/v-full-1865_Preview.srt?token=CqKfM7RMtFtdxiCe3pzjkNZ41jjLObsXxcnKWjbAfLI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-full","session_youtube_ff_id":"TUuS_IaBoRg","session_youtube_ff_link":"https://youtu.be/TUuS_IaBoRg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=0h50m52s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T18:33:00Z","title":"DaedalusData: Exploration, Knowledge Externalization and Labeling of Particles in Medical Manufacturing - A Design Study","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233332999","abstract":"Quantum computing offers significant speedup compared to classical computing, which has led to a growing interest among users in learning and applying quantum computing across various applications. However, quantum circuits, which are fundamental for implementing quantum algorithms, can be challenging for users to understand due to their underlying logic, such as the temporal evolution of quantum states and the effect of quantum amplitudes on the probability of basis quantum states. To fill this research gap, we propose QuantumEyes, an interactive visual analytics system to enhance the interpretability of quantum circuits through both global and local levels. For the global-level analysis, we present three coupled visualizations to delineate the changes of quantum states and the underlying reasons: a Probability Summary View to overview the probability evolution of quantum states; a State Evolution View to enable an in-depth analysis of the influence of quantum gates on the quantum states; a Gate Explanation View to show the individual qubit states and facilitate a better understanding of the effect of quantum gates. For the local-level analysis, we design a novel geometrical visualization dandelion chart to explicitly reveal how the quantum amplitudes affect the probability of the quantum state. We thoroughly evaluated QuantumEyes as well as the novel dandelion chart integrated into it through two case studies on different types of quantum algorithms and in-depth expert interviews with 12 domain experts. The results demonstrate the effectiveness and usability of our approach in enhancing the interpretability of quantum circuits.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Shaolun Ruan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Qiang Guan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Paul Griffin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ying Mao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yong Wang"}],"award":"","doi":"10.1109/TVCG.2023.3332999","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233332999","image_caption":"We propose QuantumEyes, an interactive visualization system to enhance the interpretability of general quantum circuits, with the integration of a visual design called Dandelion Chart to explain the quantum states regarding the probability and amplitudes of each basis states.","keywords":["Data visualization, design study, interpretability, quantum computing."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2311.07980","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233332999/v-tvcg-20233332999_Preview.mp4?token=KYsEFgeyQN1AgvlKSQJWtfU9VAyZfND8R6jfQJvuAe8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233332999/v-tvcg-20233332999_Preview.srt?token=IKfsu3IkAuw3dU6k3iBiR4xWulNiC-UvBTMwRRQGTXo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-tvcg","session_youtube_ff_id":"SPYRqbzGtdA","session_youtube_ff_link":"https://youtu.be/SPYRqbzGtdA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=0h14m2s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T17:57:00Z","title":"QuantumEyes: Towards Better Interpretability of Quantum Circuits","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233337173","abstract":"Visualization design studies bring together visualization researchers and domain experts to address yet unsolved data analysis challenges stemming from the needs of the domain experts. Typically, the visualization researchers lead the design study process and implementation of any visualization solutions. This setup leverages the visualization researchers' knowledge of methodology, design, and programming, but the availability to synchronize with the domain experts can hamper the design process. We consider an alternative setup where the domain experts take the lead in the design study, supported by the visualization experts. In this study, the domain experts are computer architecture experts who simulate and analyze novel computer chip designs. These chips rely on a Network-on-Chip (NOC) to connect components. The experts want to understand how the chip designs perform and what in the design led to their performance. To aid this analysis, we develop Vis4Mesh, a visualization system that provides spatial, temporal, and architectural context to simulated NOC behavior. Integration with an existing computer architecture visualization tool enables architects to perform deep-dives into specific architecture component behavior. We validate Vis4Mesh through a case study and a user study with computer architecture researchers. We reflect on our design and process, discussing advantages, disadvantages, and guidance for engaging in a domain expert-led design studies.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Shaoyu Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hang Yan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Katherine E. Isaacs"},{"affiliations":"","email":"","is_corresponding":true,"name":"Yifan Sun"}],"award":"","doi":"10.1109/TVCG.2023.3337173","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233337173","image_caption":"Vis4Mesh is a tool that allows computer architects to find architectural cause of the performance issues on a Network-on-Chip system.","keywords":["Data Visualization, Design Study, Network-on-Chip, Performance Analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://www.researchgate.net/publication/376004885_Visual_Exploratory_Analysis_for_Designing_Large-Scale_Network-on-Chip_Architectures_A_Domain_Expert-Led_Design_Study","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233337173/v-tvcg-20233337173_Preview.mp4?token=nO7EurtSmn1LF4IwC6akfQu2SLnIRyYXJaFKpQAU-4U&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233337173/v-tvcg-20233337173_Preview.srt?token=EAjXUyLl1KOQBaHfR23V63VuNlLF4OB3xb9u1pCm0iY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-tvcg","session_youtube_ff_id":"BqQmgA_KYII","session_youtube_ff_link":"https://youtu.be/BqQmgA_KYII","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=0h0m45s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T17:45:00Z","title":"Visual Exploratory Analysis for Designing Large-Scale Network-on-Chip Architectures: A Domain Expert-Led Design Study","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243382607","abstract":"Advanced manufacturing creates increasingly complex objects with material compositions that are often difficult to characterize by a single modality. Our domain scientists are going beyond traditional methods by employing both X-ray and neutron computed tomography to obtain complementary representations expected to better resolve material boundaries. However, the use of two modalities creates its own challenges for visualization, requiring either complex adjustments of multimodal transfer functions or the need for multiple views. Together with experts in nondestructive evaluation, we designed a novel interactive multimodal visualization approach to create a combined view of the co-registered X-ray and neutron acquisitions of industrial objects. Using an automatic topological segmentation of the bivariate histogram of X-ray and neutron values as a starting point, the system provides a simple yet effective interface to easily create, explore, and adjust a multimodal isualization. We propose a widget with simple brushing interactions that enables the user to quickly correct the segmented histogram results. Our semiautomated system enables domain experts to intuitively explore large multimodal datasets without the need for either advanced segmentation algorithms or knowledge of visualization echniques. We demonstrate our approach using synthetic examples, industrial phantom objects created to stress multimodal scanning techniques, and real-world objects, and we discuss expert feedback.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Huang, Xuan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Miao, Haichao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kim, Hyojin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Townsend, Andrew"},{"affiliations":"","email":"","is_corresponding":false,"name":"Champley, Kyle"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tringe, Joseph"},{"affiliations":"","email":"","is_corresponding":false,"name":"Pascucci, Valerio"},{"affiliations":"","email":"","is_corresponding":false,"name":"Bremer, Peer-Timo"}],"award":"","doi":"10.1109/TVCG.2024.3382607","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243382607","image_caption":"The X-Ray and neutron computed tomography industrial object XR05, consisting of multiple materials and intrinsic structures. With a morse-complex based segmentation (bottom left) on the bivariate histogram combing two modalities (top left), we present an efficient yet flexible system for examining material compositions (right).","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.11957","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243382607/v-tvcg-20243382607_Preview.mp4?token=tXtWD-Fz0i8u7eIfUdgouiGgwZ7L-nJDcmSmrOsmXGY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243382607/v-tvcg-20243382607_Preview.srt?token=aqpJ-RnPiFguRY97IwuBpLjy2Yk82W6DC49AwGefdOI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-tvcg","session_youtube_ff_id":"sGc9lmaxeHI","session_youtube_ff_link":"https://youtu.be/sGc9lmaxeHI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=0h27m7s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T18:09:00Z","title":"Bimodal Visualization of Industrial X-ray and Neutron Computed Tomography Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1100","abstract":"``Correlation does not imply causation'' is a famous mantra in statistical and visual analysis. However, consumers of visualizations often draw causal conclusions when only correlations between variables are shown. In this paper, we investigate factors that contribute to causal relationships users perceive in visualizations. We collected a corpus of concept pairs from variables in widely used datasets and created visualizations that depict varying correlative associations using three typical statistical chart types. We conducted two MTurk studies on (1) preconceived notions on causal relations without charts, and (2) perceived causal relations with charts, for each concept pair. Our results indicate that people make assumptions about causal relationships between pairs of concepts even without seeing any visualized data. Moreover, our results suggest that these assumptions constitute causal priors that, in combination with visualized association, impact how data visualizations are interpreted. The results also suggest that causal priors may lead to over- or under-estimation in perceived causal relations in different circumstances, and that those priors can also impact users' confidence in their causal assessments. In addition, our results align with prior work, indicating that chart type may also affect causal inference. Using data from the studies, we develop a model to capture the interaction between causal priors and visualized associations as they combine to impact a user's perceived causal relations. In addition to reporting the study results and analyses, we provide an open dataset of causal priors for 56 specific concept pairs that can serve as a potential benchmark for future studies. We also suggest remaining challenges and heuristic-based guidelines to help designers improve visualization design choices to better support visual causal inference.","accessible_pdf":false,"authors":[{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"zeyuwang@cs.unc.edu","is_corresponding":true,"name":"Arran Zeyu Wang"},{"affiliations":["UNC-Chapel Hill, Chapel Hill, United States"],"email":"borland@renci.org","is_corresponding":false,"name":"David Borland"},{"affiliations":["Davidson College, Davidson, United States"],"email":"tapeck@davidson.edu","is_corresponding":false,"name":"Tabitha C. Peck"},{"affiliations":["University of North Carolina, Chapel Hill, United States"],"email":"vaapad@live.unc.edu","is_corresponding":false,"name":"Wenyuan Wang"},{"affiliations":["University of North Carolina, Chapel Hill, United States"],"email":"gotz@unc.edu","is_corresponding":false,"name":"David Gotz"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1100","image_caption":"Results of participant-rated causal relationships for 56 concept pairs from open-source datasets. Participants rated the causal impact of X on Y for each pair on a scale of 1 to 5. The Y-axis in (a) shows these scores, ordered by mean causal relation on the X-axis with 95% confidence intervals. The light blue band represents the mean score +/- one standard deviation (SD). Vertical dashed lines indicate low (mean+SD) causal priors. (b) presents heat maps for four example pairs, showing participant scores. The study highlights the variability in causal priors and their impact on visualization interpretation.","keywords":["Causal inference, Perception and cognition, Causal prior, Association, Causality, Visualization"],"open_access_supplemental_link":"https://osf.io/dfkv4/?view_only=f84ffbc28cdf45e5a3d68f2f1e9c8427","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1100/v-full-1100_Preview.mp4?token=-tczNKjms6dku05m_c2tjjPtjHG0S_SDlRk3Z_2yHXI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1100/v-full-1100_Preview.srt?token=ovnAtbhGlSfYKYC6CnnCq5286IIhpEuvR4Hq-UFGsoI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-full","session_youtube_ff_id":"-9MypSwTv8w","session_youtube_ff_link":"https://youtu.be/-9MypSwTv8w","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=0h36m56s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T14:51:00Z","title":"Causal Priors and Their Influence on Judgements of Causality in Visualized Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1202","abstract":"The Dunning-Kruger Effect (DKE) is a metacognitive phenomenon where low-skilled individuals tend to overestimate their competence while high-skilled individuals tend to underestimate their competence. This effect has been observed in a number of domains including humor, grammar, and logic. In this paper, we explore if and how DKE manifests in visual reasoning and judgment tasks. Across two online user studies involving (1) a sliding puzzle game and (2) a scatterplot-based categorization task, we demonstrate that individuals are susceptible to DKE in visual reasoning and judgment tasks: those who performed best underestimated their performance, while bottom performers overestimated their performance. In addition, we contribute novel analyses that correlate susceptibility of DKE with personality traits and user interactions. Our findings pave the way for novel modes of bias detection via interaction patterns and establish promising directions towards interventions tailored to an individual\u2019s personality traits. All materials and analyses are in supplemental materials: https://github.com/CAV-Lab/DKE_supplemental.git.","accessible_pdf":false,"authors":[{"affiliations":["Emory University, Atlanta, United States"],"email":"mengyu.chen@emory.edu","is_corresponding":true,"name":"Mengyu Chen"},{"affiliations":["Emory University, Atlanta, United States"],"email":"yijun.liu2@emory.edu","is_corresponding":false,"name":"Yijun Liu"},{"affiliations":["Emory University, Atlanta, United States"],"email":"emily.wall@emory.edu","is_corresponding":false,"name":"Emily Wall"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1202","image_caption":"We replicated the Dunning-Kruger Effect (DKE) across tasks involving visual reasoning and judgment. We observed a typical DKE pattern, where highly skilled people tend to underestimate their performance, while those with lower skills often overestimate it. Additionally, we explored potential indicators of DKE, including participants\u2019 interactions, personality traits, and domain familiarity, and identified several factors related to DKE.","keywords":["Cognitive Bias, Dunning Kruger Effect, Metacognition, Personality Traits, Interactions, Visual Reasoning"],"open_access_supplemental_link":"https://github.com/CAV-Lab/DKE_supplemental.git","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1202/v-full-1202_Preview.mp4?token=1Usg6-YU7WtR6uxtp0I104Kebbawx3X1E4TIwbqbnMI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1202/v-full-1202_Preview.srt?token=nKO3RkoaNQ_z7MQ987vVLVedXUQgyfEKL8JUDHrbopY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-full","session_youtube_ff_id":"-paNXRpqH1E","session_youtube_ff_link":"https://youtu.be/-paNXRpqH1E","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=0h13m38s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T14:27:00Z","title":"Unmasking Dunning-Kruger Effect in Visual Reasoning and Visual Data Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1256","abstract":"People commonly utilize visualizations not only to examine a given dataset, but also to draw generalizable conclusions about the underlying models or phenomena. Prior research has compared human visual inference to that of an optimal Bayesian agent, with deviations from rational analysis viewed as problematic. However, human reliance on non-normative heuristics may prove advantageous in certain circumstances. We investigate scenarios where human intuition might surpass idealized statistical rationality. In two experiments, we examine individuals\u2019 accuracy in characterizing the parameters of known data-generating models from bivariate visualizations. Our findings indicate that, although participants generally exhibited lower accuracy compared to statistical models, they frequently outperformed Bayesian agents, particularly when faced with extreme samples. Participants appeared to rely on their internal models to filter out noisy visualizations, thus improving their resilience against spurious data. However, participants displayed overconfidence and struggled with uncertainty estimation. They also exhibited higher variance than statistical machines. Our findings suggest that analyst gut reactions to visualizations may provide an advantage, even when departing from rationality. These results carry implications for designing visual analytics tools, offering new perspectives on how to integrate statistical models and analyst intuition for improved inference and decision-making. The data and materials for this paper are available at https://osf.io/qmfv6","accessible_pdf":false,"authors":[{"affiliations":["Indiana University, Indianapolis, United States"],"email":"rkoonch@iu.edu","is_corresponding":true,"name":"Ratanond Koonchanok"},{"affiliations":["Argonne National Laboratory, Lemont, United States","University of Illinois Chicago, Chicago, United States"],"email":"papka@anl.gov","is_corresponding":false,"name":"Michael E. Papka"},{"affiliations":["Indiana University, Indianapolis, United States"],"email":"redak@iu.edu","is_corresponding":false,"name":"Khairi Reda"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1256","image_caption":"In this paper, we compare the ability of humans and statistical models to characterize the mean and uncertainty of the data-generating model based on visualized samples. Our results indicate that humans can outperform statistical models when faced with extreme samples. ","keywords":["Visual inference, statistical rationality, human-machine collaboration"],"open_access_supplemental_link":"https://osf.io/qmfv6","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.16871","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1256/v-full-1256_Preview.mp4?token=ilWiqf4xF0Ne1wq9zXyw97jLPCm2BTpcsVHohWb1yEI&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-full","session_youtube_ff_id":"bj8YXso5ly0","session_youtube_ff_link":"https://youtu.be/bj8YXso5ly0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=0h26m41s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T14:39:00Z","title":"Trust Your Gut: Comparing Human and Machine Inference from Noisy Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233326698","abstract":"Researchers have derived many theoretical models for specifying users\u2019 insights as they interact with a visualization system. These representations are essential for understanding the insight discovery process, such as when inferring user interaction patterns that lead to insight or assessing the rigor of reported insights. However, theoretical models can be difficult to apply to existing tools and user studies, often due to discrepancies in how insight and its constituent parts are defined. This paper calls attention to the consistent structures that recur across the visualization literature and describes how they connect multiple theoretical representations of insight. We synthesize a unified formalism for insights using these structures, enabling a wider audience of researchers and developers to adopt the corresponding models. Through a series of theoretical case studies, we use our formalism to compare and contrast existing theories, revealing interesting research challenges in reasoning about a user's domain knowledge and leveraging synergistic approaches in data mining and data management research.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Leilani Battle"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alvitta Ottley"}],"award":"","doi":"10.1109/TVCG.2023.3326698","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233326698","image_caption":"Inspired by existing definitions of insight, we present a unifying theory for the structure of insights discovered during visual analysis. The key idea is that an insight links analytic knowledge uncovered through data transformations/visualizations with the user's external domain knowledge. This core insight structure can then be adapted to form more complex insights, such as through further linking and nesting of existing insight objects. Informed by this theory, we contribute a toolkit named Pyxis for specifying insights in JavaScript code as well as motivating usage scenarios for Pyxis to advance future visualization theory, systems, and user studies.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233326698/v-tvcg-20233326698_Preview.mp4?token=obF0SbCwEZB17AEIa4zjAvqXcFfYXzOO1YmYzKe5g-4&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-tvcg","session_youtube_ff_id":"pih94nB6Mc4","session_youtube_ff_link":"https://youtu.be/pih94nB6Mc4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=1h2m50s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T15:15:00Z","title":"What Do We Mean When We Say \u201cInsight\u201d? A Formal Synthesis of Existing Theory","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233346640","abstract":"Is it true that if citizens understand hurricane probabilities, they will make more rational decisions for evacuation? Finding answers to such questions is not straightforward in the literature because the terms \u201c judgment \u201d and \u201c decision making \u201d are often used interchangeably. This terminology conflation leads to a lack of clarity on whether people make suboptimal decisions because of inaccurate judgments of information conveyed in visualizations or because they use alternative yet currently unknown heuristics. To decouple judgment from decision making, we review relevant concepts from the literature and present two preregistered experiments (N=601) to investigate if the task (judgment vs. decision making), the scenario (sports vs. humanitarian), and the visualization (quantile dotplots, density plots, probability bars) affect accuracy. While experiment 1 was inconclusive, we found evidence for a difference in experiment 2. Contrary to our expectations and previous research, which found decisions less accurate than their direct-equivalent judgments, our results pointed in the opposite direction. Our findings further revealed that decisions were less vulnerable to status-quo bias, suggesting decision makers may disfavor responses associated with inaction. We also found that both scenario and visualization types can influence people's judgments and decisions. Although effect sizes are not large and results should be interpreted carefully, we conclude that judgments cannot be safely used as proxy tasks for decision making, and discuss implications for visualization research and beyond. Materials and preregistrations are available at https://osf.io/ufzp5/?view_only=adc0f78a23804c31bf7fdd9385cb264f.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Ba\u015fak Oral"},{"affiliations":"","email":"","is_corresponding":false,"name":"Pierre Dragicevic"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alexandru Telea"},{"affiliations":"","email":"","is_corresponding":false,"name":"Evanthia Dimara"}],"award":"","doi":"10.1109/TVCG.2023.3346640","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233346640","image_caption":"The image shows a scale with a large question mark in the center, asking whether the two concepts are the same: 'Judgment,' symbolized by a magnifying glass on the left side, and 'Decision,' symbolized by a checklist on the right side.","keywords":["Data visualization, Task analysis, Decision making, Visualization, Bars, Sports, Terminology, Cognition, Decision Making, Judgment, Psychology, Visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-04354869/file/OralDecoupling.pdf","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346640/v-tvcg-20233346640_Preview.mp4?token=bFPCtI--QDmxBpAxwoJ95iAO_SLRw8i-Gmi9ezsctD4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346640/v-tvcg-20233346640_Preview.srt?token=HQjTPEwxtlkgdZ20Y3hgLGQPO1SjBmvRuq8ERFTRpVU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-tvcg","session_youtube_ff_id":"GojbFqP_xqs","session_youtube_ff_link":"https://youtu.be/GojbFqP_xqs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=0h0m48s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T14:15:00Z","title":"Decoupling Judgment and Decision Making: A Tale of Two Tails","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233346713","abstract":"Recent growth in the popularity of large language models has led to their increased usage for summarizing, predicting, and generating text, making it vital to help researchers and engineers understand how and why they work. We present KnowledgeVIS , a human-in-the-loop visual analytics system for interpreting language models using fill-in-the-blank sentences as prompts. By comparing predictions between sentences, KnowledgeVIS reveals learned associations that intuitively connect what language models learn during training to natural language tasks downstream, helping users create and test multiple prompt variations, analyze predicted words using a novel semantic clustering technique, and discover insights using interactive visualizations. Collectively, these visualizations help users identify the likelihood and uniqueness of individual predictions, compare sets of predictions between prompts, and summarize patterns and relationships between predictions across all prompts. We demonstrate the capabilities of KnowledgeVIS with feedback from six NLP experts as well as three different use cases: (1) probing biomedical knowledge in two domain-adapted models; and (2) evaluating harmful identity stereotypes and (3) discovering facts and relationships between three general-purpose models.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Adam Coscia"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alex Endert"}],"award":"","doi":"10.1109/TVCG.2023.3346713","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233346713","image_caption":"Evaluating generative LLMs for stereotypes and biases is hard. Fill-in-the-blank sentences as prompts can reveal biases, yet many fill-in-the-blank analysis methods are limited to one sentence at a time. Our solution, KnowledgeVIS, makes it easy to create multiple sentence prompts, then visually compare LLM predictions across sentences. We studied how KnowledgeVIS helps developers close the loop of LLM evaluation and contribute guidelines for improving human-in-the-loop NLP. KnowledgeVIS is open-source and live at: https://github.com/AdamCoscia/KnowledgeVIS. For the full story, please read our paper!","keywords":["Visual analytics, language models, prompting, interpretability, machine learning."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/pdf/2403.04758","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346713/v-tvcg-20233346713_Preview.mp4?token=f6g5iPvFyz17qSg3QF89xpvfwdqLRXE7iBD5fJRN3Vc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346713/v-tvcg-20233346713_Preview.srt?token=s6wQXlGhwA-bODTFxNuCgLFi6qsuY_1vOZA_vJAaRZ0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-tvcg","session_youtube_ff_id":"OhiCpSl5jgs","session_youtube_ff_link":"https://youtu.be/OhiCpSl5jgs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=0h50m32s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T15:03:00Z","title":"KnowledgeVIS: Interpreting Language Models by Comparing Fill-in-the-Blank Prompts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1142","abstract":"To deploy machine learning models on-device, practitioners use compression algorithms to shrink and speed up models while maintaining their high-quality output. A critical aspect of compression in practice is model comparison, including tracking many compression experiments, identifying subtle changes in model behavior, and negotiating complex accuracy-efficiency trade-offs. However, existing compression tools poorly support comparison, leading to tedious and, sometimes, incomplete analyses spread across disjoint tools. To support real-world comparative workflows, we develop an interactive visual system called Compress and Compare. Within a single interface, Compress and Compare surfaces promising compression strategies by visualizing provenance relationships between compressed models and reveals compression-induced behavior changes by comparing models\u2019 predictions, weights, and activations. We demonstrate how Compress and Compare supports common compression analysis tasks through two case studies, debugging failed compression on generative language models and identifying compression artifacts in image classification models. We further evaluate Compress and Compare in a user study with eight compression experts, illustrating its potential to provide structure to compression workflows, help practitioners build intuition about compression, and encourage thorough analysis of compression\u2019s effect on model behavior. Through these evaluations, we identify compression-specific challenges that future visual analytics tools should consider and Compress and Compare visualizations that may generalize to broader model comparison tasks.","accessible_pdf":true,"authors":[{"affiliations":["Massachusetts Institute of Technology, Cambridge, United States"],"email":"aboggust@mit.edu","is_corresponding":true,"name":"Angie Boggust"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"vsivaram@andrew.cmu.edu","is_corresponding":false,"name":"Venkatesh Sivaraman"},{"affiliations":["Apple, Cambridge, United States"],"email":"yassogba@gmail.com","is_corresponding":false,"name":"Yannick Assogba"},{"affiliations":["Apple, Seattle, United States"],"email":"donghao@apple.com","is_corresponding":false,"name":"Donghao Ren"},{"affiliations":["Apple, Pittsburgh, United States"],"email":"domoritz@cmu.edu","is_corresponding":false,"name":"Dominik Moritz"},{"affiliations":["Apple, Seattle, United States"],"email":"fred.hohman@gmail.com","is_corresponding":false,"name":"Fred Hohman"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1142","image_caption":"Compress and Compare helps ML practitioners analyze and compare compression experiments. The Model Map helps practitioners understand what experiments were run and find high-performing sequences of operations, while the Model Scatterplot and Selection Details views help compare accuracy and efficiency metrics quantitatively. Our paper describes the challenges that Compress and Compare addresses, how we designed the system, and a study with eight experts demonstrating its potential to support compression workflows.","keywords":["Efficient machine learning, model compression, visual analytics, model comparison"],"open_access_supplemental_link":"https://github.com/apple/ml-compress-and-compare","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/pdf/2408.03274","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1142/v-full-1142_Preview.mp4?token=7bLX7TtYd-7Di9wJoeHtOTxkKehZFEDVJj3D4r93Uoo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1142/v-full-1142_Preview.srt?token=sApfAyEMxtKgQ5SzPXgHjhpIke2I3KKtKRVsbusIKtk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-full","session_youtube_ff_id":"5tS7HFn5W6Y","session_youtube_ff_link":"https://youtu.be/5tS7HFn5W6Y","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=0h53m30s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T13:18:00Z","title":"Compress and Compare: Interactively Evaluating Efficiency and Behavior Across ML Model Compression Experiments","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1179","abstract":"Multi-objective evolutionary algorithms (MOEAs) have emerged as powerful tools for solving complex optimization problems characterized by multiple, often conflicting, objectives. While advancements have been made in computational efficiency as well as diversity and convergence of solutions, a critical challenge persists: the internal evolutionary mechanisms are opaque to human users. Drawing upon the successes of explainable AI in explaining complex algorithms and models, we argue that the need to understand the underlying evolutionary operators and population dynamics within MOEAs aligns well with a visual analytics paradigm. This paper introduces ParetoTracker, a visual analytics framework designed to support the comprehension and inspection of population dynamics in the evolutionary processes of MOEAs. Informed by preliminary literature review and expert interviews, the framework establishes a multi-level analysis scheme, which caters to user engagement and exploration ranging from examining overall trends in performance metrics to conducting fine-grained inspections of evolutionary operations. In contrast to conventional practices that require manual plotting of solutions for each generation, ParetoTracker facilitates the examination of temporal trends and dynamics across consecutive generations in an integrated visual interface. The effectiveness of the framework is demonstrated through case studies and expert interviews focused on widely adopted benchmark optimization problems.","accessible_pdf":false,"authors":[{"affiliations":["Southern University of Science and Technology, Shenzhen, China"],"email":"zhangzr32021@mail.sustech.edu.cn","is_corresponding":false,"name":"Zherui Zhang"},{"affiliations":["Southern University of Science and Technology, Shenzhen, China"],"email":"yangf2020@mail.sustech.edu.cn","is_corresponding":true,"name":"Fan Yang"},{"affiliations":["Southern University of Science and Technology, Shenzhen, China"],"email":"ranchengcn@gmail.com","is_corresponding":false,"name":"Ran Cheng"},{"affiliations":["Southern University of Science and Technology, Shenzhen, China"],"email":"mayx@sustech.edu.cn","is_corresponding":false,"name":"Yuxin Ma"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1179","image_caption":"We introduce ParetoTracker, a visual analytics framework designed to illustrate the dynamics of population generations within evolutionary processes of MOEAs, which consists of three main components: Performance Overview and Generation Statistics (A) Visual Exploration of Individuals among Generations (B) In-depth Visual Inspection of Operators (C).","keywords":["Visual analytics, multi-objective evolutionary algorithms, evolutionary computation"],"open_access_supplemental_link":"https://github.com/VIS-SUSTech/ParetoTracker","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.04539","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1179/v-full-1179_Preview.mp4?token=HiWFcD9cnvwRDeNVeu8K30udpLarCgMmf95DedtPB38&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-full","session_youtube_ff_id":"iExTSj-IaHc","session_youtube_ff_link":"https://youtu.be/iExTSj-IaHc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=1h8m24s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T13:30:00Z","title":"ParetoTracker: Understanding Population Dynamics in Multi-objective Evolutionary Algorithms through Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1258","abstract":"Providing effective guidance for users has long been an important and challenging task for efficient exploratory visual analytics, especially when selecting variables for visualization in high-dimensional datasets. Correlation is the most widely applied metric for guidance in statistical and analytical tools, however a reliance on correlation may lead users towards false positives when interpreting causal relations in the data. In this work, inspired by prior insights on the benefits of counterfactual visualization in supporting visual causal inference, we propose a novel, simple, and efficient counterfactual guidance method to enhance causal inference performance in guided exploratory analytics based on insights and concerns gathered from expert interviews. Our technique aims to capitalize on the benefits of counterfactual approaches while reducing their complexity for users. We integrated counterfactual guidance into an exploratory visual analytics system, and using a synthetically generated ground-truth causal dataset, conducted a comparative user study and evaluated to what extent counterfactual guidance can help lead users to more precise visual causal inferences. The results suggest that counterfactual guidance improved visual causal inference performance, and also led to different exploratory behaviors compared to correlation-based guidance. Based on these findings, we offer future directions and challenges for incorporating counterfactual guidance to better support exploratory visual analytics.","accessible_pdf":false,"authors":[{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"zeyuwang@cs.unc.edu","is_corresponding":true,"name":"Arran Zeyu Wang"},{"affiliations":["UNC-Chapel Hill, Chapel Hill, United States"],"email":"borland@renci.org","is_corresponding":false,"name":"David Borland"},{"affiliations":["University of North Carolina, Chapel Hill, United States"],"email":"gotz@unc.edu","is_corresponding":false,"name":"David Gotz"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1258","image_caption":"The proposed counterfactual guidance technique is compared with traditional correlation-based guidance through five scenarios. Using the example question \"Will coffee drinking cause differences in students' grades?\", an analyst might compare data based on coffee consumption and grade distributions. The leftmost column lists the subsets created, and charts illustrate five potential distribution combinations (a-e), suggesting different answers. Symbols at the bottom indicate which methods accurately interpret the data. Counterfactual-based approaches have advantages in two scenarios and perform equally in the other three.","keywords":["Counterfactual, Guidance, Exploratory visual analysis, Visual causal inference, Correlation"],"open_access_supplemental_link":"https://github.com/VACLab/Co-op","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1258/v-full-1258_Preview.mp4?token=VYZkWSazuZO9S5NwEeptlwzOFMY2nnIEGDfaeGEKwyY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1258/v-full-1258_Preview.srt?token=pMJxvpJBUzRXIXcEYTAns27ai8gHIQp3yOwkNGtXc58&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-full","session_youtube_ff_id":"xFxX4tX8KKM","session_youtube_ff_link":"https://youtu.be/xFxX4tX8KKM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=0h0m47s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T12:30:00Z","title":"Beyond Correlation: Incorporating Counterfactual Guidance to Better Support Exploratory Visual Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1309","abstract":"Visualizations play a critical role in validating and improving statistical models. However, the design space of model check visualizations is not well understood, making it difficult for authors to explore and specify effective graphical model checks. VMC defines a model check visualization using four components: (1) samples of distributions of checkable quantities generated from the model,including predictive distributions for new data and distributions of model parameters; (2) transformations on observed data to facilitate comparison; (3) visual representations of distributions; and (4) layouts to facilitate comparing model samples and observed data. We contribute an implementation of VMC as an R package. We validate VMC by reproducing a set of canonical model check examples, and show how using VMC to generate model checks reduces the edit distance between visualizations relative to existing visualization toolkits. The findings of an interview study with three expert modelers who used VMC highlight challenges and opportunities for encouraging exploration of correct, effective model check visualizations.","accessible_pdf":false,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"ziyangguo1030@gmail.com","is_corresponding":true,"name":"Ziyang Guo"},{"affiliations":["University of Chicago, Chicago, United States"],"email":"kalea@uchicago.edu","is_corresponding":false,"name":"Alex Kale"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"jhullman@northwestern.edu","is_corresponding":false,"name":"Jessica Hullman"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1309","image_caption":"Example model check visualizations authored with VMC, using data from [ 46 ]. From left to right: checks on the density curves of the distributions of model predictions and observed data from (A) response variable to (B) distributional parameter; follow-up checks conditional on the quantitative predictor, where VMC is used to specify (C) Hypothetical Outcome Plots and (D) a line + ribbon plot; (E) a facet check stratifying the random effects and (F) a multilevel check; more checks for the random effects specified by VMC, including (G) raincloud plots and (H) multiple-interval plots; and residual checks specified by VMC, including (I) residual plots revealing the heteroskedasticity of the model and (J) Q-Q plots, validating the normality of residuals.","keywords":["Model checking and evaluation; Uncertainty visualization; Grammar of Graphics"],"open_access_supplemental_link":"https://mucollective.github.io/vmc/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1309/v-full-1309_Preview.mp4?token=_d8vt5BoHauPleUOak6d14mJ7U4ji1klBajeDY0bUv0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1309/v-full-1309_Preview.srt?token=rWwzC9Gl8Y-pZxPODj45nY8nXCPwX6GWxHTDBCMvW5k&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-full","session_youtube_ff_id":"OqNLDTwT7DY","session_youtube_ff_link":"https://youtu.be/OqNLDTwT7DY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=0h24m48s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T12:54:00Z","title":"VMC: A Grammar for Visualizing Statistical Model Checks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1547","abstract":"Visual validation of regression models in scatterplots is a common practice for assessing model quality, yet its efficacy remains unquantified. We conducted two empirical experiments to investigate individuals\u2019 ability to visually validate linear regression models (linear trends) and to examine the impact of common visualization designs on validation quality. The first experiment showed that the level of accuracy for visual estimation of slope (i.e., fitting a line to data) is higher than for visual validation of slope (i.e., accepting a shown line). Notably, we found bias toward slopes that are \u201ctoo steep\u201d in both cases. This lead to novel insights that participants naturally assessed regression with orthogonal distances between the points and the line (i.e., ODR regression) rather than the common vertical distances (OLS regression). In the second experiment, we investigated whether incorporating common designs for regression visualization (error lines, bounding boxes, and confidence intervals) would improve visual validation. Even though error lines reduced validation bias, results failed to show the desired improvements in accuracy for any design. Overall, our findings suggest caution in using visual model validation for linear trends in scatterplots.","accessible_pdf":false,"authors":[{"affiliations":["University of Cologne, Cologne, Germany"],"email":"braun@cs.uni-koeln.de","is_corresponding":true,"name":"Daniel Braun"},{"affiliations":["Tufts University, Medford, United States"],"email":"remco@cs.tufts.edu","is_corresponding":false,"name":"Remco Chang"},{"affiliations":["University of Wisconsin - Madison, Madison, United States"],"email":"gleicher@cs.wisc.edu","is_corresponding":false,"name":"Michael Gleicher"},{"affiliations":["University of Cologne, Cologne, Germany"],"email":"landesberger@cs.uni-koeln.de","is_corresponding":false,"name":"Tatiana von Landesberger"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1547","image_caption":"\u201cVisual summary\u201d of visual validation and estimation accuracy for linear trends in scatterplots. The figure shows the true regression line (green) for OLS together with participants\u2019 average response for estimation (blue) and the range of lines with an acceptance rate of 50% or higher for validation (orange).","keywords":["Perception, visual model validation, visual model estimation, user study, information visualization"],"open_access_supplemental_link":"https://visva.cs.uni-koeln.de/en/publications/beware-of-validation-by-eye-visual-validation-of-linear-trends-in-scatterplots","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.11625","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1547/v-full-1547_Preview.mp4?token=SSXo5mnJOmbYrf5k4_9Y7_5zEcT5DmSz-KJd7OTPcyo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1547/v-full-1547_Preview.srt?token=LLrihZt2TX9EdY69thgGKMo33P18_McHP5ewTqId8Cc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-full","session_youtube_ff_id":"-Ohr2rTpvXI","session_youtube_ff_link":"https://youtu.be/-Ohr2rTpvXI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=0h15m14s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T12:42:00Z","title":"Beware of Validation by Eye: Visual Validation of Linear Trends in Scatterplots","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233302308","abstract":"We visualize the predictions of multiple machine learning models to help biologists as they interactively make decisions about cell lineage---the development of a (plant) embryo from a single ovum cell. Based on a confocal microscopy dataset, traditionally biologists manually constructed the cell lineage, starting from this observation and reasoning backward in time to establish their inheritance. To speed up this tedious process, we make use of machine learning (ML) models trained on a database of manually established cell lineages to assist the biologist in cell assignment. Most biologists, however, are not familiar with ML, nor is it clear to them which model best predicts the embryo's development. We thus have developed a visualization system that is designed to support biologists in exploring and comparing ML models, checking the model predictions, detecting possible ML model mistakes, and deciding on the most likely embryo development. To evaluate our proposed system, we deployed our interface with six biologists in an observational study. Our results show that the visual representations of machine learning are easily understandable, and our tool, LineageD+, could potentially increase biologists' working efficiency and enhance the understanding of embryos.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Jiayi Hong"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ross Maciejewski"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alain Trubuil"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tobias Isenberg"}],"award":"","doi":"10.1109/TVCG.2023.3302308","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233302308","image_caption":"In this paper, we examine the human-AI interaction within the context of plant embryo lineage analysis. To facilitate this investigation, we developed a system called LineageD+, which visualizes predictions from multiple machine learning models. This system aims to assist biologists in reconstructing the development history of plant embryos.","keywords":["Visualization, visual analytics, machine learning, comparing ML predictions, human-AI teaming, plant biology, cell lineage"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-04212205","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233302308/v-tvcg-20233302308_Preview.mp4?token=RobmTzM0OBSW6v7647_8RCVKa3CfNSnTAxYRFWKOzO0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233302308/v-tvcg-20233302308_Preview.srt?token=5D89AFZIBjrJvDk3bMTaK7lAJV1HGylEQCaJuUcqiKk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-tvcg","session_youtube_ff_id":"reu4ziIvQYk","session_youtube_ff_link":"https://youtu.be/reu4ziIvQYk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=0h41m31s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T13:06:00Z","title":"Visualizing and Comparing Machine Learning Predictions to Improve Human-AI Teaming on the Example of Cell Lineage","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1026","abstract":"We present a visual analytics approach for multi-level visual exploration of users' interaction strategies in an interactive digital environment. The use of interactive touchscreen exhibits in informal learning environments, such as museums and science centers, often incorporate frameworks that classify learning processes, such as Bloom\u2019s taxonomy, to achieve better user engagement and knowledge transfer. To analyze user behavior within these digital environments, interaction logs are recorded to capture diverse exploration strategies. However, analysis of such logs is challenging, especially in terms of coupling interactions and cognitive learning processes, and existing work within learning and educational contexts remains limited. To address these gaps, we develop a visual analytics approach for analyzing interaction logs that supports exploration at the individual user level and multi-user comparison. The approach utilizes algorithmic methods to identify similarities in users' interactions and reveal their exploration strategies. We motivate and illustrate our approach through an application scenario, using event sequences derived from interaction log data in an experimental study conducted with science center visitors from diverse backgrounds and demographics. The study involves 14 users completing tasks of increasing complexity, designed to stimulate different levels of cognitive learning processes. We implement our approach in an interactive visual analytics prototype system, named VISID, and together with domain experts, discover a set of task-solving exploration strategies, such as \"cascading\" and \"nested-loop\", which reflect different levels of learning processes from Bloom's taxonomy. Finally, we discuss the generalizability and scalability of the presented system and the need for further research with data acquired in the wild.","accessible_pdf":true,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"peilin.yu@liu.se","is_corresponding":true,"name":"Peilin Yu"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"aida.vitoria@liu.se","is_corresponding":false,"name":"Aida Nordman"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"marta.koc-januchta@liu.se","is_corresponding":false,"name":"Marta M. Koc-Januchta"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden","Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"konrad.schonborn@liu.se","is_corresponding":false,"name":"Konrad J Sch\u00f6nborn"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"lonni.besancon@gmail.com","is_corresponding":false,"name":"Lonni Besan\u00e7on"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"katerina.vrotsou@liu.se","is_corresponding":false,"name":"Katerina Vrotsou"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1026","image_caption":"The main components of VISID comprise an Individual View (upper) and a Comparison View (lower). The Individual View can be alternated to visualize: (a) a participant's attribute change and interaction event sequences, or (b) their interface event sequences representing the concurrently opened infopanels and their lifetime duration. The Comparison View consists of three parts. From left to right, it visualizes interaction sequences ranked by the similarity score to a baseline participant in descending order. The similarity score bars and delta values (middle) depict the similarity/dissimilarity with respect to the baseline participant. The Cluster View (right) shows potential clusters of similar participants.","keywords":["Visual analytics, Visualization systems and tools, Interaction logs, Visualization techniques, Visual learning"],"open_access_supplemental_link":"https://osf.io/wnz32/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.31219/osf.io/4yc8s","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1026/v-full-1026_Preview.mp4?token=MfxoMMoKp4WxUS5LilcXPAr5s9n5tgd6GhmKH7e3Zp4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1026/v-full-1026_Preview.srt?token=NwPulJzqSk6dZeJQIrKFDRhPWCOLO7ikFkIqpCl5Pxg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-full","session_youtube_ff_id":"H9JJoBZBGNk","session_youtube_ff_link":"https://youtu.be/H9JJoBZBGNk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h0m30s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T14:15:00Z","title":"Revealing Interaction Dynamics: Multi-Level Visual Exploration of User Strategies with an Interactive Digital Environment","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1642","abstract":"Despite the development of numerous visual analytics tools for event sequence data across various domains, including but not limited to healthcare, digital marketing, and user behavior analysis, comparing these domain-specific investigations and transferring the results to new datasets and problem areas remain challenging. Task abstractions can help us go beyond domain-specific details, but existing visualization task abstractions are insufficient for event sequence visual analytics because they primarily focus on multivariate datasets and often overlook automated analytical techniques. To address this gap, we propose a domain-agnostic multi-level task framework for event sequence analytics, derived from an analysis of 58 papers that present event sequence visualization systems. Our framework consists of four levels: objective, intent, strategy, and technique. Overall objectives identify the main goals of analysis. Intents comprises five high-level approaches adopted at each analysis step: augment data, simplify data, configure data, configure visualization, and manage provenance. Each intent is accomplished through a number of strategies, for instance, data simplification can be achieved through aggregation, summarization, or segmentation. Finally, each strategy can be implemented by a set of techniques depending on the input and output components. We further show that each technique can be expressed through a quartet of action-input-output-criteria. We demonstrate the framework\u2019s descriptive power through case studies and discuss its similarities and differences with previous event sequence task taxonomies.","accessible_pdf":false,"authors":[{"affiliations":["University of Maryland, College Park, College Park, United States"],"email":"kzintas@umd.edu","is_corresponding":true,"name":"Kazi Tasnim Zinat"},{"affiliations":["University of Maryland, College Park, United States"],"email":"ssakhamu@terpmail.umd.edu","is_corresponding":false,"name":"Saimadhav Naga Sakhamuri"},{"affiliations":["University of Maryland, College Park, United States"],"email":"achen151@terpmail.umd.edu","is_corresponding":false,"name":"Aaron Sun Chen"},{"affiliations":["University of Maryland, College Park, United States"],"email":"leozcliu@umd.edu","is_corresponding":false,"name":"Zhicheng Liu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1642","image_caption":"From bigger picture to finer details Our four-tier framework consists of four levels: Objectives, Intents, Strategies, and Techniques, providing a common language to enhance cross-domain collaboration and tool evaluation.","keywords":["Task Abstraction, Event Sequence Data"],"open_access_supplemental_link":"https://osf.io/bkjsc/?view_only=b95871b8c4ae497ab9b6cb565e28edf5","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1642/v-full-1642_Preview.mp4?token=6EC8R0dh9lTx_Yo63GE8xfzo74rMtLPiKE-sX05YXik&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1642/v-full-1642_Preview.srt?token=DCfmsMRFf68vDodSJcDYHRDq3OopfC9esYpGXacq6as&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-full","session_youtube_ff_id":"4WP9eGQ_hwI","session_youtube_ff_link":"https://youtu.be/4WP9eGQ_hwI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h25m16s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T14:39:00Z","title":"A Multi-Level Task Framework for Event Sequence Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243358919","abstract":"We conduct two in-lab experiments (N=93) to evaluate the effectiveness of Gantt charts, extended Gantt charts, and stringline charts for visualizing fixed-order event sequence data. We first formulate five types of event sequences and define three types of sequence elements: point events, interval events, and the temporal gaps between them. Our two experiments focus on event sequences with a pre-defined, fixed order, and measure task error rates and completion time. The first experiment shows single sequences and assesses the three charts' performance in comparing event duration or gap. The second experiment shows multiple sequences and evaluates how well the charts reveal temporal patterns. The results suggest that when visualizing single fixed-order event sequences, 1) Gantt and extended Gantt charts lead to comparable error rates in the duration-comparing task; 2) Gantt charts exhibit either shorter or equal completion time than extended Gantt charts; 3) both Gantt and extended Gantt charts demonstrate shorter completion times than stringline charts; 4) however, stringline charts outperform the other two charts with fewer errors in the comparing task when event type counts are high. Additionally, when visualizing multiple point-based fixed-order event sequences, stringline charts require less time than Gantt charts for people to find temporal patterns. Based on these findings, we discuss design opportunities for visualizing fixed-order event sequences and discuss future avenues for optimizing these charts.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Junxiu Tang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Fumeng Yang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jiang Wu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yifang Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jiayi Zhou"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xiwen Cai"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lingyun Yu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"10.1109/TVCG.2024.3358919","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243358919","image_caption":"In two lab experiments with 93 participants, we assessed the performance of Gantt, extended Gantt, and stringline charts for visualizing fixed-order event sequences. We introduced five event sequence types with point events, interval events, and temporal gaps. Experiment 1 focused on comparing event duration or gaps in single sequences, while Experiment 2 assessed pattern detection in multiple sequences. Results indicate Gantt and extended Gantt charts had similar error rates and faster completion times than stringline charts for single sequence. However, stringline charts were more accurate with numerous event types. For multiple sequences, stringline charts are quicker for pattern detection.","keywords":["Gantt chart, stringline chart, Marey's graph, event sequence, empirical study"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/zpdne","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243358919/v-tvcg-20243358919_Preview.mp4?token=YP7eNz4W93N5GiZNwbr-U7L-UQEm6jMkbOb3v4o0YBQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243358919/v-tvcg-20243358919_Preview.srt?token=MxOnHDpxuV2ztsYfQDv6pLK3RFbuY1mUEGyHlXl0UCU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-tvcg","session_youtube_ff_id":"PTsFxQUWvIE","session_youtube_ff_link":"https://youtu.be/PTsFxQUWvIE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h49m0s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T15:03:00Z","title":"A Comparative Study on Fixed-order Event Sequence Visualizations: Gantt, Extended Gantt, and Stringline Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243364388","abstract":"Seasonal-trend decomposition based on loess (STL) is a powerful tool to explore time series data visually. In this paper, we present an extension of STL to uncertain data, named uncertainty-aware STL (UASTL). Our method propagates multivariate Gaussian distributions mathematically exactly through the entire analysis and visualization pipeline. Thereby, stochastic quantities shared between the components of the decomposition are preserved. Moreover, we present application scenarios with uncertainty modeling based on Gaussian processes, e.g., data with uncertain areas or missing values. Besides these mathematical results and modeling aspects, we introduce visualization techniques that address the challenges of uncertainty visualization and the problem of visualizing highly correlated components of a decomposition. The global uncertainty propagation enables the time series visualization with STL-consistent samples, the exploration of correlation between and within decomposition's components, and the analysis of the impact of varying uncertainty. Finally, we show the usefulness of UASTL and the importance of uncertainty visualization with several examples. Thereby, a comparison with conventional STL is performed.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Tim Krake"},{"affiliations":"","email":"","is_corresponding":false,"name":"Daniel Kl\u00f6tzl"},{"affiliations":"","email":"","is_corresponding":false,"name":"David H\u00e4gele"},{"affiliations":"","email":"","is_corresponding":false,"name":"Daniel Weiskopf"}],"award":"","doi":"10.1109/TVCG.2024.3364388","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243364388","image_caption":"Seasonal-trend decomposition based on loess (STL) is used to visually explore time series. Our extension to uncertain data (UASTL) propagates uncertainty mathematically exactly through the entire analysis and visualization pipeline. Thereby, stochastic quantities shared between the components of the decomposition are preserved. Moreover, application scenarios with uncertainty modeling are presented and visualization techniques are introduced that address the challenges of uncertainty visualization and the problem of visualizing highly correlated components of a decomposition. The global uncertainty propagation enables the exploration of correlation and a sensitivity analysis to study the impact of varying uncertainty.","keywords":["- I.6.9.g Visualization techniques and methodologies < I.6.9 Visualization < I.6 Simulation, Modeling, and Visualization < I Compu - G.3 Probability and Statistics < G Mathematics of Computing - G.3.n Statistical computing < G.3 Probability and Statistics < G Mathematics of Computing - G.3.p Stochastic processes < G.3 Probability and Statistics < G Mathematics of Computing"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243364388/v-tvcg-20243364388_Preview.mp4?token=BCg7deigEVGTPFbGM0Hp_7U9pRNUNevSKYv9dhp_GWY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243364388/v-tvcg-20243364388_Preview.srt?token=fI63P6FvvRjgqvHg18msLSIGv-lLiAEzbr-KmGlhudw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-tvcg","session_youtube_ff_id":"PMo1LcjeZFY","session_youtube_ff_link":"https://youtu.be/PMo1LcjeZFY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h12m56s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T14:27:00Z","title":"Uncertainty-Aware Seasonal-Trend Decomposition Based on Loess","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243376406","abstract":"Visualizing event timelines for collaborative text writing is an important application for navigating and understanding such data, as time passes and the size and complexity of both text and timeline increase. They are often employed by applications such as code repositories and collaborative text editors. In this paper, we present a visualization tool to explore historical records of writing of legislative texts, which were discussed and voted on by an assembly of representatives. Our visualization focuses on event timelines from text documents that involve multiple people and different topics, allowing for observation of different proposed versions of said text or tracking data provenance of given text sections, while highlighting the connections between all elements involved. We also describe the process of designing such a tool alongside domain experts, with three steps of evaluation being conducted to verify the effectiveness of our design.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Gabriel D. Cantareira"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yiwen Xing"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nicholas Cole"},{"affiliations":"","email":"","is_corresponding":true,"name":"Rita Borgo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alfie Abdul-Rahman"}],"award":"","doi":"10.1109/TVCG.2024.3376406","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243376406","image_caption":"This picture presents multiple views of the timeline of a historical document, showing multiple versions interacting over time (top) and a detailed breakdown of a version with selectable components (bottom).","keywords":["Data visualization, Collaboration, History, Humanities, Writing, Navigation, Metadata"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243376406/v-tvcg-20243376406_Preview.mp4?token=OjcNXImlbRw700hwSXy06KGQ6tEBUWc_a5ltQnoFgr4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243376406/v-tvcg-20243376406_Preview.srt?token=6nuoKdWt-oYrUTUkWIZAL1xCDEwD-SD_I1GSVfblXfQ&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-tvcg","session_youtube_ff_id":"qp4KUQLtxbM","session_youtube_ff_link":"https://youtu.be/qp4KUQLtxbM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h55m53s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T15:15:00Z","title":"Interactive Hierarchical Timeline for Collaborative Text Negotiation in Historical Records","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243382760","abstract":"Time-stamped event sequences (TSEQs) are time-oriented data without value information, shifting the focus of users to the exploration of temporal event occurrences. TSEQs exist in application domains, such as sleeping behavior, earthquake aftershocks, and stock market crashes. Domain experts face four challenges, for which they could use interactive and visual data analysis methods. First, TSEQs can be large with respect to both the number of sequences and events, often leading to millions of events. Second, domain experts need validated metrics and features to identify interesting patterns. Third, after identifying interesting patterns, domain experts contextualize the patterns to foster sensemaking. Finally, domain experts seek to reduce data complexity by data simplification and machine learning support. We present IVESA, a visual analytics approach for TSEQs. It supports the analysis of TSEQs at the granularities of sequences and events, supported with metrics and feature analysis tools. IVESA has multiple linked views that support overview, sort+filter, comparison, details-on-demand, and metadata relation-seeking tasks, as well as data simplification through feature analysis, interactive clustering, filtering, and motif detection and simplification. We evaluated IVESA with three case studies and a user study with six domain experts working with six different datasets and applications. Results demonstrate the usability and generalizability of IVESA across applications and cases that had up to 1,000,000 events.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"J\u00fcrgen Bernard"},{"affiliations":"","email":"","is_corresponding":false,"name":"Clara-Maria Barth"},{"affiliations":"","email":"","is_corresponding":false,"name":"Eduard Cuba"},{"affiliations":"","email":"","is_corresponding":false,"name":"Andrea Meier"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yasara Peiris"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ben Shneiderman"}],"award":"","doi":"10.1109/TVCG.2024.3382760","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243382760","image_caption":"Overview of IVESA. On the left, the Sequence Overview and Details View primarily enable the analysis of the TSEQs content, i.e., events, event sequences, groups of event sequences, motifs, and features. On the right, the Metadata View supports the analysis of metadata attributes and the TSEQs contextualization, whereas the Summary View includes the entry point to auxiliary views for filtering, motif configuration, feature analysis, and clustering.","keywords":["Time-Stamped Event Sequences, Time-Oriented Data, Visual Analytics, Data-First Design Study, Iterative Design, Visual Interfaces, User Evaluation"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243382760/v-tvcg-20243382760_Preview.mp4?token=j-q4DH0nD7WReNPJBJp9K2SNPr_8QxawbxwqV-8QMpA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243382760/v-tvcg-20243382760_Preview.srt?token=azEobpUAVOriiRVFx4PMGdKRcO5j2ztPWCWVuKbLIOE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-tvcg","session_youtube_ff_id":"7ffZxu1Nkgo","session_youtube_ff_link":"https://youtu.be/7ffZxu1Nkgo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h37m42s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T14:51:00Z","title":"Visual Analysis of Time-Stamped Event Sequences","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1137","abstract":"Abstract\u2014Inspired by recent advances in digital fabrication, artists and scientists have demonstrated that physical data encodings (i.e., data physicalizations) can increase engagement with data, foster collaboration, and in some cases, improve data legibility and analysis relative to digital alternatives. However, prior empirical studies have only investigated abstract data encoded in physical form (e.g., laser cut bar charts) and not continuously sampled spatial data fields relevant to climate and medical science (e.g., heights, temperatures, densities, and velocities sampled on a spatial grid). This paper presents the design and results of the first study to characterize human performance in 3D spatial data analysis tasks across analogous physical and digital visualizations. Participants analyzed continuous spatial elevation data with three visualization modalities: (1) 2D digital visualization; (2) perspective-tracked, stereoscopic \"fishtank\" virtual reality; and (3) 3D printed data physicalization. Their tasks included tracing paths downhill, looking up spatial locations and comparing their relative heights, and identifying and reporting the minimum and maximum heights within certain spatial regions. As hypothesized, in most cases, participants performed the tasks just as well or better in the physical modality (based on time and error metrics). Additional results include an analysis of open-ended feedback from participants and discussion of implications for further research on the value of data physicalization. All data and supplemental materials are available at https://osf.io/7xdq4/.","accessible_pdf":true,"authors":[{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"bridger.g.herman@gmail.com","is_corresponding":true,"name":"Bridger Herman"},{"affiliations":["Beth Israel Deaconess Medical Center, Boston, United States"],"email":"cdjackso@bidmc.harvard.edu","is_corresponding":false,"name":"Cullen D. Jackson"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"dfk@umn.edu","is_corresponding":false,"name":"Daniel F. Keefe"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1137","image_caption":"Data physicalizations provide many potential benefits over digital data displays, including haptic perception and body-centric judgments. This paper compares the effectiveness of physicalizations (left) with virtual reality (right top) and 2D visualizations (right bottom) for spatial data analysis tasks on digital elevation data common in climate science and natural resource management.","keywords":["Data physicalization, virtual reality, evaluation."],"open_access_supplemental_link":"https://osf.io/7xdq4/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/z4s9d","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1137/v-full-1137_Preview.mp4?token=MHGa74Psew6hZ1UpP2QgZ6SX6fMiKuMe3Ls2xjCriw0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1137/v-full-1137_Preview.srt?token=2ogKNpPzr0iJqMqzl-UOMt0D9wnbWfzqVxHbCvsqisU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-full","session_youtube_ff_id":"84IvcxzBg7U","session_youtube_ff_link":"https://youtu.be/84IvcxzBg7U","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h37m14s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T18:21:00Z","title":"Touching the Ground: Evaluating the Effectiveness of Data Physicalizations for Spatial Data Analysis Tasks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1500","abstract":"Haptic feedback provides an essential sensory stimulus crucial for interaction and analyzing three-dimensional spatio-temporal phenomena on surface visualizations. Given its ability to provide enhanced spatial perception and scene maneuverability, virtual reality (VR) catalyzes haptic interactions on surface visualizations. Various interaction modes, encompassing both mid-air and on-surface interactions---with or without the application of assisting force stimuli---have been explored using haptic force feedback devices. In this paper, we evaluate the use of on-surface and assisted on-surface haptic modes of interaction compared to a no-haptic interaction mode. A force-based haptic stylus is used for all three modalities; the on-surface mode uses collision based forces, whereas the assisted on-surface mode is accompanied by an additional snapping force. We conducted a within-subjects user study involving fundamental interaction tasks performed on surface visualizations. Keeping a consistent visual design across all three modes, our study incorporates tasks that require the localization of the highest, lowest, and random points on surfaces; and tasks that focus on brushing curves on surfaces with varying complexity and occlusion levels. Our findings show that participants took almost the same time to brush curves using all the interaction modes. They could draw smoother curves using the on-surface interaction modes compared to the no-haptic mode. However, the assisted on-surface mode provided better accuracy than the on-surface mode. The on-surface mode was slower in point localization, but the accuracy depended on the visual cues and occlusions associated with the tasks. Finally, we discuss participant feedback on using haptic force feedback as a tangible input modality and share takeaways to aid the design of haptics-based tangible interactions for surface visualizations. ","accessible_pdf":false,"authors":[{"affiliations":["University of Calgary, Calgary, Canada"],"email":"hamza.afzaal@ucalgary.ca","is_corresponding":true,"name":"Hamza Afzaal"},{"affiliations":["University of Calgary, Calgary, Canada"],"email":"ualim@ucalgary.ca","is_corresponding":false,"name":"Usman Alim"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1500","image_caption":"The figure shows how a force-based haptic stylus (middle-top) is used to interact with 3D surface visualizations. A virtual stylus (left) is used to interact with the surface, with an assistive force (middle-bottom) that activates when the stylus enters \"snap zone\" (S) above the surface (M). The forces in snap zone are calculated using a combination of spring and snapping forces. The paths traced by participants (right) illustrate how the stylus aligns with the surface geometry, guided by these snapping forces, while the surface texture and the Laplacian of the distance transform emphasize the smoothness and accuracy of the paths.","keywords":["Scalar Field Data, Guidelines, Interaction Design, Human-Subjects Quantitative Studies, Domain Agnostic, Isosurface Techniques, Computer Graphics Techniques, AR/VR/Immersive, Specialized Input/Display Hardware"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.48550/arXiv.2408.04031","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1500/v-full-1500_Preview.mp4?token=jgv8mXTzZajUyl2IUm_dK6-zrdp-umkLuenVdRWoJN0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1500/v-full-1500_Preview.srt?token=7SpPLecKhHGy023qP07DEm035Bsvx364jPfUa-tXgeI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-full","session_youtube_ff_id":"jJowp-dAYp8","session_youtube_ff_link":"https://youtu.be/jJowp-dAYp8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h48m44s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T18:33:00Z","title":"Evaluating Force-based Haptics for Immersive Tangible Interactions with Surface Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1522","abstract":"Despite the recent surge of research efforts to make data visualizations accessible to people who are blind or have low vision (BLV), how to support BLV people's data analysis remains an important and challenging question. As refreshable tactile displays (RTDs) become cheaper and conversational agents continue to improve, their combination provides a promising approach to support BLV people's interactive data exploration and analysis. To understand how BLV people would use and react to a system combining an RTD with a conversational agent, we conducted a Wizard-of-Oz study with 11 BLV participants, where they interacted with line charts, bar charts, and isarithmic maps. Our analysis of participants' interactions led to the identification of nine distinct patterns. We also learned that the choice of modalities depended on the type of task and prior experience with tactile graphics, and that participants strongly preferred the combination of RTD and speech to a single modality. In addition, participants with more tactile experience described how tactile images facilitated a deeper engagement with the data and supported independent interpretation. Our findings will inform the design of interfaces for such interactive mixed-modality systems.","accessible_pdf":true,"authors":[{"affiliations":["Monash University, Melbourne, Australia"],"email":"samuel.reinders@monash.edu","is_corresponding":false,"name":"Samuel Reinders"},{"affiliations":["Monash University, Melbourne, Australia"],"email":"matthew.butler@monash.edu","is_corresponding":false,"name":"Matthew Butler"},{"affiliations":["Monash University, Clayton, Australia"],"email":"ingrid.zukerman@monash.edu","is_corresponding":false,"name":"Ingrid Zukerman"},{"affiliations":["Yonsei University, Seoul, Korea, Republic of","Microsoft Research, Redmond, United States"],"email":"b.lee@yonsei.ac.kr","is_corresponding":false,"name":"Bongshin Lee"},{"affiliations":["Monash University, Melbourne, Australia"],"email":"lizhen.qu@monash.edu","is_corresponding":false,"name":"Lizhen Qu"},{"affiliations":["Monash University, Melbourne, Australia"],"email":"kim.marriott@monash.edu","is_corresponding":true,"name":"Kim Marriott"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1522","image_caption":"We explored how refreshable tactile displays (RTDs) can be combined with conversational agents to assist people who are blind or have low vision (BLV) in undertaking data analysis activities. We used a Wizard-of-Oz method, allowing participants to manipulate charts rendered on the RTD, perform touch gestures, and ask the conversational agent questions to aid their understanding. Pictured is an RTD with a stacked bar chart rendered on the screen. A user is reaching out with both hands, touching raised pins on the RTD that make up the different components of the bar chart. ","keywords":["Accessible data visualization, refreshable tactile displays, conversational agents, interactive data exploration, Wizard of Oz study, people who are blind or have low vision"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.04806","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1522/v-full-1522_Preview.mp4?token=SEEy97d0dqlKpk3cT4vIQZNToqbJOTSBwqAygXATai4&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-full","session_youtube_ff_id":"Xw469H8JWP4","session_youtube_ff_link":"https://youtu.be/Xw469H8JWP4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h25m24s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T18:09:00Z","title":"When Refreshable Tactile Displays Meet Conversational Agents: Investigating Accessible Data Presentation and Analysis with Touch and Speech","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1626","abstract":"We propose and study a novel cross-reality environment that seamlessly integrates a monoscopic 2D surface (an interactive screen with touch and pen input) with a stereoscopic 3D space (an augmented reality HMD) to jointly host spatial data visualizations. This innovative approach combines the best of two conventional methods of displaying and manipulating spatial 3D data, enabling users to fluidly explore diverse visual forms using tailored interaction techniques. Providing such effective 3D data exploration techniques is pivotal for conveying its intricate spatial structures---often at multiple spatial or semantic scales---across various application domains and requiring diverse visual representations for effective visualization. To understand user reactions to our new environment, we began with an elicitation user study, in which we captured their responses and interactions. We observed that users adapted their interaction approaches based on perceived visual representations, with natural transitions in spatial awareness and actions while navigating across the physical surface. Our findings then informed the development of a design space for spatial data exploration in cross-reality. We thus developed cross-reality environments tailored to three distinct domains: for 3D molecular structure data, for 3D point cloud data, and for 3D anatomical data. In particular, we designed interaction techniques that account for the inherent features of interactions in both spaces, facilitating various forms of interaction, including mid-air gestures, touch interactions, pen interactions, and combinations thereof, to enhance the users' sense of presence and engagement. We assessed the usability of our environment with biologists, focusing on its use for domain research. In addition, we evaluated our interaction transition designs with virtual and mixed-reality experts to gather further insights. As a result, we provide our design suggestions for the cross-reality environment, emphasizing the interaction with diverse visual representations and seamless interaction transitions between 2D and 3D spaces.","accessible_pdf":false,"authors":[{"affiliations":["Xi'an Jiaotong-Liverpool University, Suzhou, China"],"email":"lixiang.zhao17@student.xjtlu.edu.cn","is_corresponding":true,"name":"Lixiang Zhao"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tobias.isenberg@gmail.com","is_corresponding":false,"name":"Tobias Isenberg"},{"affiliations":["Xi'an Jiaotong-Liverpool University, Suzhou, China"],"email":"fuqi.xie20@student.xjtlu.edu.cn","is_corresponding":false,"name":"Fuqi Xie"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"hainingliang@hkust-gz.edu.cn","is_corresponding":false,"name":"Hai-Ning Liang"},{"affiliations":["Xi'an Jiaotong-Liverpool University, Suzhou, China"],"email":"lingyun.yu@xjtlu.edu.cn","is_corresponding":false,"name":"Lingyun Yu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1626","image_caption":"SpatialTouch is a novel cross-reality environment that seamlessly integrates a monoscopic 2D surface (an interactive screen with touch and pen input) with a stereoscopic 3D space (an augmented reality HMD) to jointly host spatial data visualizations. This innovative approach combines the best of two conventional methods of displaying and manipulating spatial 3D data, enabling users to fluidly explore diverse visual forms using tailored interaction techniques. Providing such effective 3D data exploration techniques is pivotal for conveying its intricate spatial structures---often at multiple spatial or semantic scales---across various application domains and requiring diverse visual representations for effective visualization.","keywords":["Spatial data, immersive visualization, cross reality, interaction techniques"],"open_access_supplemental_link":"https://osf.io/avxr9","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14833","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1626/v-full-1626_Preview.mp4?token=sK3xgzYvz9vy3e12YjM6EeAlIsksBkwUQRHAkZSg-e0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1626/v-full-1626_Preview.srt?token=nNi8GNmk5EYUK_KG5cb0-5UOBQf0CamZesLxXXKI1Zo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-full","session_youtube_ff_id":"C-F1zT-UgsE","session_youtube_ff_link":"https://youtu.be/C-F1zT-UgsE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h59m39s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T18:45:00Z","title":"SpatialTouch: Exploring Spatial Data Visualizations in Cross-reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1917","abstract":"The importance of data charts is self-evident, given their ability to express complex data in a simple format that facilitates quick and easy comparisons, analysis, and consumption. However, the inherent visual nature of the charts creates barriers for people with visual impairments to reap the associated benefits to the same extent as their sighted peers. While extant research has predominantly focused on understanding and addressing these barriers for blind screen reader users, the needs of low-vision screen magnifier users have been largely overlooked. In an interview study, almost all low-vision participants stated that it was challenging to interact with data charts on small screen devices such as smartphones and tablets, even though they could technically \u201csee\u201d the chart content. They ascribed these challenges mainly to the magnification induced loss of visual context that connected data points with each other and also with chart annotations, e.g., axis values. In this paper, we present a method that addresses this problem by automatically transforming charts that are typically non-interactive images into personalizable interactive charts which allow selective viewing of desired data points and preserve visual context as much as possible under screen enlargement. We evaluated our method in a usability study with 26 low-vision participants, who all performed a set of representative chart-related tasks under different study conditions. In the study, we observed that our method significantly improved the usability of charts over both the status quo screen magnifier and a state-of-the-art space compaction-based solution. ","accessible_pdf":true,"authors":[{"affiliations":["Old Dominion University, Norfolk, United States"],"email":"yprak001@odu.edu","is_corresponding":true,"name":"Yash Prakash"},{"affiliations":["Old Dominion University, Norfolk, United States"],"email":"pkhan002@odu.edu","is_corresponding":false,"name":"Pathan Aseef Khan"},{"affiliations":["Old Dominion University, Norfolk, United States"],"email":"anaya001@odu.edu","is_corresponding":false,"name":"Akshay Kolgar Nayak"},{"affiliations":["Old Dominion University, Norfolk, United States"],"email":"uksjayarathna@gmail.com","is_corresponding":false,"name":"Sampath Jayarathna"},{"affiliations":["Michigan State University, East Lansing, United States"],"email":"leehaena@msu.edu","is_corresponding":false,"name":"Hae-Na Lee"},{"affiliations":["Old Dominion University, Norfolk, United States"],"email":"vganjigu@odu.edu","is_corresponding":false,"name":"Vikas Ashok"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1917","image_caption":"This figure illustrates the user journey for GraphLite, highlighting how low-vision users enhance data visualization on smartphones. The journey begins with users swiping up to access a theme picker, adjusting visual elements like contrast, colors, and font size (a). Next, they use a customization menu to filter and view specific data points, navigating options with the \"Next\" button and finalizing with \"Done,\" while also using a slide gesture to navigate selections (b). Finally, users personalize the visualization by adjusting bar colors, improving data interpretation and accessibility (c).","keywords":["Low vision, Graph usability, Screen magnifer, Graph perception, Accessibility"],"open_access_supplemental_link":"https://github.com/accessodu/GraphLite.git","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1917/v-full-1917_Preview.mp4?token=MBZ8ifJtfXtnMfRB7H2bLh5bT5dInwre0HDdcTyarSE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1917/v-full-1917_Preview.srt?token=KFhnAMhnRNglx-KY1SH0jQikfdNIWo4oj6SEmyyqAZg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-full","session_youtube_ff_id":"2R4conY9Pfw","session_youtube_ff_link":"https://youtu.be/2R4conY9Pfw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h13m7s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T17:57:00Z","title":"Towards Enhancing Low Vision Usability of Data Charts on Smartphones","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243356566","abstract":"The increasing ubiquity of data in everyday life has elevated the importance of data literacy and accessible data representations, particularly for individuals with disabilities. While prior research predominantly focuses on the needs of the visually impaired, our survey aims to broaden this scope by investigating accessible data representations across a more inclusive spectrum of disabilities. After conducting a systematic review of 152 accessible data representation papers from ACM and IEEE databases, we found that roughly 78% of existing articles center on vision impairments. In this paper, we conduct a comprehensive review of the remaining 22% of papers focused on underrepresented disability communities. We developed categorical dimensions based on accessibility, visualization, and human-computer interaction to classify the papers. These dimensions include the community of focus, issues addressed, contribution type, study methods, participants, data type, visualization type, and data domain. Our work redefines accessible data representations by illustrating their application for disabilities beyond those related to vision. Building on our literature review, we identify and discuss opportunities for future research in accessible data representations. All supplemental materials are available at https://osf.io/ yv4xm/?view only=7b36a3fbf7a14b3888029966faa3def9.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Brianna L. Wimer"},{"affiliations":"","email":"","is_corresponding":false,"name":"Laura South"},{"affiliations":"","email":"","is_corresponding":false,"name":"Keke Wu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Danielle Albers Szafir"},{"affiliations":"","email":"","is_corresponding":false,"name":"Michelle A. Borkin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ronald A. Metoyer"}],"award":"","doi":"10.1109/TVCG.2024.3356566","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243356566","image_caption":"Survey of 152 papers on accessible data visualizations showing 78% focus on visual disabilities while 22% cover other disabilities.","keywords":["Accessibility, Data Representations."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/6prxd","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243356566/v-tvcg-20243356566_Preview.mp4?token=F3jud3aiWaCnh5W-VS696mwebVM5hw-8x6FoAVqtDh4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243356566/v-tvcg-20243356566_Preview.srt?token=s5KpUa07Knc-IjMG-14pViBZCHwARKKaCCSB_aHf3uE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-tvcg","session_youtube_ff_id":"Kh-u47UPXnU","session_youtube_ff_link":"https://youtu.be/Kh-u47UPXnU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h0m49s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T17:45:00Z","title":"Beyond Vision Impairments: Redefining the Scope of Accessible Data Representations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1302","abstract":"We present the results of an exploratory study on how pairs interact with speech commands and touch gestures on a wall-sized display during a collaborative sensemaking task. Previous work has shown that speech commands, alone or in combination with other input modalities, can support visual data exploration by individuals. However, it is still unknown whether and how speech commands can be used in collaboration, and for what tasks. To answer these questions, we developed a functioning prototype that we used as a technology probe. We conducted an in-depth exploratory study with 10 participant pairs to analyze their interaction choices, the interplay between the input modalities, and their collaboration. While touch was the most used modality, we found that participants preferred speech commands for global operations, used them for distant interaction, and that speech interaction contributed to the awareness of the partner\u2019s actions. Furthermore, the likelihood of using speech commands during collaboration was related to the personality trait of agreeableness. Regarding collaboration styles, participants interacted with speech equally often whether they were in loosely or closely coupled collaboration. While the partners stood closer to each other during close collaboration, they did not distance themselves to use speech commands. From our findings, we derive and contribute a set of design considerations for collaborative and multimodal interactive data analysis systems. All supplemental materials are available at https://osf.io/8gpv2.","accessible_pdf":true,"authors":[{"affiliations":["University of Bremen, Bremen, Germany","University of Bremen, Bremen, Germany"],"email":"molina@uni-bremen.de","is_corresponding":true,"name":"Gabriela Molina Le\u00f3n"},{"affiliations":["LISN, Universit\u00e9 Paris-Saclay, CNRS, INRIA, Orsay, France"],"email":"anastasia.bezerianos@universite-paris-saclay.fr","is_corresponding":false,"name":"Anastasia Bezerianos"},{"affiliations":["Inria, Palaiseau, France"],"email":"olivier.gladin@inria.fr","is_corresponding":false,"name":"Olivier Gladin"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"petra.isenberg@inria.fr","is_corresponding":false,"name":"Petra Isenberg"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1302","image_caption":"Two people standing in front of the wall display; one person is moving a group of selected documents by dragging a stack of them with the index finger while the other one observes.","keywords":["Speech interaction, wall display, collaborative sensemaking, multimodal interaction, collaboration styles"],"open_access_supplemental_link":"https://osf.io/8gpv2/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.03813","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1302/v-full-1302_Preview.mp4?token=X7Rtn5_FSND2OZ7n5tcNS3dGX4peJ_l1b467_T1gcoo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1302/v-full-1302_Preview.srt?token=g_tsHKQ3ALZ2lV4XhlpYWC82NHAFsoAG6MIV-xB4iu0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-full","session_youtube_ff_id":"-xq224J5umc","session_youtube_ff_link":"https://youtu.be/-xq224J5umc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=1h3m4s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T17:00:00Z","title":"Talk to the Wall: The Role of Speech Interaction in Collaborative Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1329","abstract":"The integration of Large Language Models (LLMs), especially ChatGPT, into education is poised to revolutionize students\u2019 learning experiences by introducing innovative conversational learning methodologies. To empower students to fully leverage the capabilities of ChatGPT in educational scenarios, understanding students\u2019 interaction patterns with ChatGPT is crucial for instructors. However, this endeavor is challenging due to the absence of datasets focused on student-ChatGPT conversations and the complexities in identifying and analyzing the evolutional interaction patterns within conversations. To address these challenges, we collected conversational data from 48 students interacting with ChatGPT in a master\u2019s level data visualization course over one semester. We then developed a coding scheme, grounded in the literature on cognitive levels and thematic analysis, to categorize students\u2019 interaction patterns with ChatGPT. Furthermore, we present a visual analytics system, StuGPTViz, that tracks and compares temporal patterns in student prompts and the quality of ChatGPT\u2019s responses at multiple scales, revealing significant pedagogical insights for instructors. We validated the system\u2019s effectiveness through expert interviews with six data visualization instructors and three case studies. The results confirmed StuGPTViz\u2019s capacity to enhance educators\u2019 insights into the pedagogical value of ChatGPT. We also discussed the potential research opportunities of applying visual analytics in education and developing AI-driven personalized learning solutions.","accessible_pdf":true,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"zchendf@connect.ust.hk","is_corresponding":true,"name":"Zixin Chen"},{"affiliations":["The Hong Kong University of Science and Technology, Sai Kung, China"],"email":"wangjiachen@zju.edu.cn","is_corresponding":false,"name":"Jiachen Wang"},{"affiliations":["Texas A","M University, College Station, United States"],"email":"xiameng9355@gmail.com","is_corresponding":false,"name":"Meng Xia"},{"affiliations":["The Hong Kong University of Science and Technology, Kowloon, Hong Kong"],"email":"kshigyo@connect.ust.hk","is_corresponding":false,"name":"Kento Shigyo"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"dliuak@connect.ust.hk","is_corresponding":false,"name":"Dingdong Liu"},{"affiliations":["Hong Kong University of Science and Technology, Hong Kong, Hong Kong"],"email":"rzhangab@connect.ust.hk","is_corresponding":false,"name":"Rong Zhang"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1329","image_caption":"We developed StuGPTViz, a visual analytics system designed to analyze and compare student interactions with ChatGPT in a master's-level data visualization course. By categorizing prompts and responses using a coding scheme grounded in literature on cognitive levels and thematic analysis, the system reveals key patterns and insights. Validated through expert interviews and case studies, StuGPTViz enhances educators' understanding of ChatGPT's pedagogical value, demonstrating the potential of visual analytics to drive AI-driven personalized learning and improve educational outcomes.","keywords":["Visual analytics for education, ChatGPT for education, student-ChatGPT interaction"],"open_access_supplemental_link":"https://github.com/CinderD/StuGPTViz_Supplemental","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.12423","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1329/v-full-1329_Preview.mp4?token=3rbgGFvDtMShBC9lwJqoPby4KLpDPaQSfRuudFGzbmo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1329/v-full-1329_Preview.srt?token=Vu9ILrtMjr886gOJFjLuEkfBzp_gBW8c7A3hd4MdWxQ&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-full","session_youtube_ff_id":"r4bxhQuXqIM","session_youtube_ff_link":"https://youtu.be/r4bxhQuXqIM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=0h0m48s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T16:00:00Z","title":"StuGPTViz: A Visual Analytics Approach to Understand Student-ChatGPT Interactions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1368","abstract":"Synthetic Lethal (SL) relationships, though rare among the vast array of gene combinations, hold substantial promise for targeted cancer therapy. Despite advancements in AI model accuracy, there is still a significant need among domain experts for interpretive paths and mechanism explorations that align better with domain-specific knowledge, particularly due to the high costs of experimentation. To address this gap, we propose an iterative Human-AI collaborative framework with two key components: 1) Human-Engaged Knowledge Graph Refinement based on Metapath Strategies, which leverages insights from interpretive paths and domain expertise to refine the knowledge graph through metapath strategies with appropriate granularity. 2) Cross-Granularity SL Interpretation Enhancement and Mechanism Analysis, which aids experts in organizing and comparing predictions and interpretive paths across different granularities, uncovering new SL relationships, enhancing result interpretation, and elucidating potential mechanisms inferred by Graph Neural Network (GNN) models. These components cyclically optimize model predictions and mechanism explorations, enhancing expert involvement and intervention to build trust. Facilitated by SLInterpreter, this framework ensures that newly generated interpretive paths increasingly align with domain knowledge and adhere more closely to real-world biological principles through iterative Human-AI collaboration. We evaluate the framework\u2019s efficacy through a case study and expert interviews.","accessible_pdf":false,"authors":[{"affiliations":["Shanghaitech University, Shanghai, China"],"email":"jianghr2023@shanghaitech.edu.cn","is_corresponding":true,"name":"Haoran Jiang"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"shishh2023@shanghaitech.edu.cn","is_corresponding":false,"name":"Shaohan Shi"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"zhangshh2@shanghaitech.edu.cn","is_corresponding":false,"name":"Shuhao Zhang"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"zhengjie@shanghaitech.edu.cn","is_corresponding":false,"name":"Jie Zheng"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"liquan@shanghaitech.edu.cn","is_corresponding":false,"name":"Quan Li"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1368","image_caption":"SLInterpreter, based on an iterative Human-AI collaboration framework, aims at 1) Human-Engaged Knowledge Graph Refinement based on Metapath Strategies and 2) Cross-Granularity SL Interpretation Enhancement and Mechanism Analysis for domain experts. Domain experts explore new SL pairs using interpretive paths generated by a model trained on the entire data. Irrelevant or incorrect paths that may introduce noise are eliminated from the KG using appropriate metapath strategies. Subsequently, the model retrains, allowing domain experts to iteratively scrutinize predictions and interpretive paths, refining the KG. This iterative process optimizes predictions and mechanism exploration, enhancing expert participation and intervention, leading to increased trust. ","keywords":["Synthetic Lethality, Model Interpretability, Visual Analytics, Iterative Human-AI Collaboration."],"open_access_supplemental_link":"https://github.com/jianghr-shanghaitech/SLInterpreter-Demo","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14770","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1368/v-full-1368_Preview.mp4?token=2pjLZ8kfmwZTY5NRT1gcmw7TYjbBieYXXm59fK4BgU0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1368/v-full-1368_Preview.srt?token=m4Eev96u53ePZ8jNtMBDWEOvYZGzxUCUmnR8PHCT350&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-full","session_youtube_ff_id":"eaCCRPrMxk8","session_youtube_ff_link":"https://youtu.be/eaCCRPrMxk8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=0h14m15s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T16:12:00Z","title":"SLInterpreter: An Exploratory and Iterative Human-AI Collaborative System for GNN-based Synthetic Lethal Prediction","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1487","abstract":"Referential gestures, or as termed in linguistics, deixis, are an essential part of communication around data visualizations. Despite their importance, such gestures are often overlooked when documenting data analysis meetings. Transcripts, for instance, fail to capture gestures, and video recordings may not adequately capture or emphasize them. We introduce a novel method for documenting collaborative data meetings that treats deixis as a first-class citizen. Our proposed framework captures cursor-based gestural data along with audio and converts them into interactive documents. The framework leverages a large language model to identify word correspondences with gestures. These identified references are used to create context-based annotations in the resulting interactive document. We assess the effectiveness of our proposed method through a user study, finding that participants preferred our automated interactive documentation over recordings, transcripts, and manual note-taking. Furthermore, we derive a preliminary taxonomy of cursor-based deictic gestures from participant actions during the study. This taxonomy offers further opportunities for better utilizing cursor-based deixis in collaborative data analysis scenarios.","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"hatch.on27@gmail.com","is_corresponding":true,"name":"Chang Han"},{"affiliations":["The University of Utah, Salt Lake City, United States"],"email":"kisaacs@sci.utah.edu","is_corresponding":false,"name":"Katherine E. Isaacs"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1487","image_caption":"An overview of the interactive notes, with: (A) Interactive text, comprising transcripts from audio and the LLM-generated meeting minutes, includes interactive text components based on the results of utterance matching and reference extraction. (B) Visual media from the meetings are presented with annotations based on parameters transmitted by the interactive text on the left. This operation can change the underlying visualization, add annotations, and alter interactive states. ","keywords":["Taxonomy, Models, Frameworks, Theory ; Collaboration ; Communication/Presentation, Storytelling"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.04041","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1487/v-full-1487_Preview.mp4?token=FQT1xLvgPUr_7zSaXHM2CKNTj1_d2c5nF6amiRsmBtY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1487/v-full-1487_Preview.srt?token=GmbrGAoHSO2hRXYZfucWq6Ym-yzgi6syszwqk7mHthw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-full","session_youtube_ff_id":"c8cC1W9ucj8","session_youtube_ff_link":"https://youtu.be/c8cC1W9ucj8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=0h39m5s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T16:36:00Z","title":"A Deixis-Centered Approach for Documenting Remote Synchronous Communication around Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20223229017","abstract":"We present V-Mail, a framework of cross-platform applications, interactive techniques, and communication protocols for improved multi-person correspondence about spatial 3D datasets. Inspired by the daily use of e-mail, V-Mail seeks to enable a similar style of rapid, multi-person communication accessible on any device; however, it aims to do this in the new context of spatial 3D communication, where limited access to 3D graphics hardware typically prevents such communication. The approach integrates visual data storytelling with data exploration, spatial annotations, and animated transitions. V-Mail ``data stories'' are exported in a standard video file format to establish a common baseline level of access on (almost) any device. The V-Mail framework also includes a series of complementary client applications and plugins that enable different degrees of story co-authoring and data exploration, adjusted automatically to match the capabilities of various devices. A lightweight, phone-based V-Mail app makes it possible to annotate data by adding captions to the video. These spatial annotations are then immediately accessible to team members running high-end 3D graphics visualization systems that also include a V-Mail client, implemented as a plugin. Results and evaluation from applying V-Mail to assist communication within an interdisciplinary science team studying Antarctic ice sheets confirm the utility of the asynchronous, cross-platform collaborative framework while also highlighting some current limitations and opportunities for future work.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Jung Who Nam"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tobias Isenberg"},{"affiliations":"","email":"","is_corresponding":true,"name":"Daniel F. Keefe"}],"award":"","doi":"10.1109/TVCG.2022.3229017","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20223229017","image_caption":"V-Mail is a framework of cross-platform applications, interactive techniques, and communication protocols for multi-person correspondence about spatial 3D datasets. It has three working platforms that demonstrate different storytelling fidelities of V-Mail: (bottom-left) anyone with a video player can at least passively view the story, including annotations made by others; (top-right) in the highest-fidelity case, the story unlocks data on a V-Mail server than can be loaded via a plugin for desktop-based visualization applications, where users can explore and annotate the 3D data more deeply; (bottom-right) the mobile client works as a custom video player with mechanisms for adding annotations. ","keywords":["Human-computer interaction, visualization of scientific 3D data, communication, storytelling, immersive analytics"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-03924707","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20223229017/v-tvcg-20223229017_Preview.mp4?token=IxbiG0ErK3J9Z37IFK6q9x6nleACetMR6uzAo29MSk8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20223229017/v-tvcg-20223229017_Preview.srt?token=q9ZZ0nKol3LUeioElIKQC0rnZo08cDV3NmSNp9rilik&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-tvcg","session_youtube_ff_id":"5sr7x3v9C1Y","session_youtube_ff_link":"https://youtu.be/5sr7x3v9C1Y","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=0h27m6s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T16:24:00Z","title":"V-Mail: 3D-Enabled Correspondence about Spatial Data on (Almost) All Your Devices","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233323150","abstract":"We examined user preferences to combine multiple interaction modalities for collaborative interaction with data shown on large vertical displays. Large vertical displays facilitate visual data exploration and allow the use of diverse interaction modalities by multiple users at different distances from the screen. Yet, how to offer multiple interaction modalities is a non-trivial problem. We conducted an elicitation study with 20 participants that generated 1015 interaction proposals combining touch, speech, pen, and mid-air gestures. Given the opportunity to interact using these four 13 modalities, participants preferred speech interaction in 10 of 15 14 low-level tasks and direct manipulation for straightforward tasks 15 such as showing a tooltip or selecting. In contrast to previous work, 16 participants most favored unimodal and personal interactions. We 17 identified what we call collaborative synonyms among their interaction proposals and found that pairs of users collaborated either unimodally and simultaneously or multimodally and sequentially. We provide insights into how end-users associate visual exploration tasks with certain modalities and how they collaborate at different interaction distances using specific interaction modalities. The supplemental material is available at https://osf.io/m8zuh.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Gabriela Molina Le\u00f3n"},{"affiliations":"","email":"","is_corresponding":false,"name":"Petra Isenberg"},{"affiliations":"","email":"","is_corresponding":false,"name":"Andreas Breiter"}],"award":"","doi":"10.1109/TVCG.2023.3323150","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233323150","image_caption":"One person taps on the large vertical display to position an annotation on a bar chart, while the second one waits to perform a speech command to complete the annotation.","keywords":["Multimodal interaction, collaborative work, large vertical displays, elicitation study, spatio-temporal data"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://inria.hal.science/hal-04365019","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233323150/v-tvcg-20233323150_Preview.mp4?token=oeG0IHkINWFB_kHZN1cFo4gGQIikZc5JzGhSffvQrqo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233323150/v-tvcg-20233323150_Preview.srt?token=-mODdCEMCAaG_-Yn7OVw0Pv66hYopF49mxbOk6GZYyE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-tvcg","session_youtube_ff_id":"3_88Dw8U6wo","session_youtube_ff_link":"https://youtu.be/3_88Dw8U6wo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=0h50m10s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T16:48:00Z","title":"Eliciting Multimodal and Collaborative Interactions for Data Exploration on Large Vertical Displays","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1063","abstract":"While previous work has found success in deploying visualizations as museum exhibits, it has not investigated whether museum context impacts visitor behaviour with these exhibits. We present an interactive Deep-time Literacy Visualization Exhibit (DeLVE) to help museum visitors understand deep time (lengths of extremely long geological processes) by improving proportional reasoning skills through comparison of different time periods. DeLVE uses a new visualization idiom, Connected Multi-Tier Ranges, to visualize curated datasets of past events across multiple scales of time, relating extreme scales with concrete scales that have more familiar magnitudes and units. Museum staff at three separate museums approved the deployment of DeLVE as a digital kiosk, and devoted time to curating a unique dataset in each of them. We collect data from two sources, an observational study and system trace logs. We discuss the importance of context: similar museum exhibits in different contexts were received very differently by visitors. We additionally discuss differences in our process from Sedlmair et al.'s design study methodology which is focused on design studies triggered by connection with collaborators rather than the discovery of a concept to communicate. Supplemental materials are available at: https://osf.io/z53dq/","accessible_pdf":true,"authors":[{"affiliations":["The University of British Columbia, Vancouver, Canada"],"email":"marasolen@gmail.com","is_corresponding":true,"name":"Mara Solen"},{"affiliations":["University of British Columbia , Vancouver, Canada"],"email":"sultananigar70@gmail.com","is_corresponding":false,"name":"Nigar Sultana"},{"affiliations":["University of British Columbia, Vancouver, Canada"],"email":"laura.lukes@ubc.ca","is_corresponding":false,"name":"Laura A. Lukes"},{"affiliations":["University of British Columbia, Vancouver, Canada"],"email":"tmm@cs.ubc.ca","is_corresponding":false,"name":"Tamara Munzner"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1063","image_caption":"The DeLVE visualization software, displaying the dataset of past events in biological and geological history, as deployed at a biology museum. The data is visualized across multiple scales using our novel Connected Multi-Tier Ranges idiom.","keywords":["Visualization, design study, museum, deep time."],"open_access_supplemental_link":"https://osf.io/z53dq/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2404.01488","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1063/v-full-1063_Preview.mp4?token=imowDK921MoWpId_cyY984Z_zSeJnC13Qem8GS3ciF0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1063/v-full-1063_Preview.srt?token=qJtfNNpAEr4S1t3_WdJqLeMQXxxliYqfkZaDzb_89Gc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-full","session_youtube_ff_id":"LoSRYmcllmY","session_youtube_ff_link":"https://youtu.be/LoSRYmcllmY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h0m44s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T16:00:00Z","title":"DeLVE into Earth\u2019s Past: A Visualization-Based Exhibit Deployed Across Multiple Museum Contexts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1333","abstract":"Data videos increasingly becoming a popular data storytelling form represented by visual and audio integration. In recent years, more and more researchers have explored many narrative structures for effective and attractive data storytelling. Meanwhile, the Hero's Journey provides a classic narrative framework specific to the Hero's story that has been adopted by various mediums. There are continuous discussions about applying Hero's Journey to data stories. However, so far, little systematic and practical guidance on how to create a data video for a specific story type like the Hero's Journey, as well as how to manipulate its sound and visual designs simultaneously. To fulfill this gap, we first identified 48 data videos aligned with the Hero's Journey as the common storytelling from 109 high-quality data videos. Then, we examined how existing practices apply Hero's Journey for creating data videos. We coded the 48 data videos in terms of the narrative stages, sound design, and visual design according to the Hero's Journey structure. Based on our findings, we proposed a design space to provide practical guidance on the narrative, visual, and sound custom design for different narrative segments of the hero's journey (i.e., Departure, Initiation, Return) through data video creation. To validate our proposed design space, we conducted a user study where 20 participants were invited to design data videos with and without our design space guidance, which was evaluated by two experts. Results show that our design space provides useful and practical guidance for data storytellers effectively creating data videos with the Hero's Journey.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, Hong Kong"],"email":"zwei302@connect.hkust-gz.edu.cn","is_corresponding":true,"name":"Zheng Wei"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"xxubq@connect.ust.hk","is_corresponding":false,"name":"Xian Xu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1333","image_caption":"Applying the Hero's Journey as a framework for creating data videos, we organize a design space into three segments (i.e., Departure, Initiation, Return), grounded in the narrative structure of the Hero's Journey. The Departure has six narrative stages, the Initiation has seven narrative stages, and the Return has four narrative stages. Each narrative stage is equipped with corresponding sound design and visual design.","keywords":["The Hero's Journey, Narrative Structure, Narrative Visualization, Data Visualization, Data Videos"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1333/v-full-1333_Preview.mp4?token=fETBO1EkdBunsDoPRjp9sEM_y_dQMiu_zH8vQNcA_3E&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-full","session_youtube_ff_id":"IXwVnOl8OAo","session_youtube_ff_link":"https://youtu.be/IXwVnOl8OAo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h12m47s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T16:12:00Z","title":"Telling Data Stories with the Hero\u2019s Journey: Design Guidance for Creating Data Videos","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1425","abstract":"Comics are an effective method for sequential data-driven storytelling, especially for dynamic graphs\u2014graphs whose vertices and edges change over time. However, manually creating such comics is currently time-consuming, complex, and error-prone. In this paper, we propose DG Comics, a novel comic authoring tool for dynamic graphs that allows users to semi-automatically build and annotate comics. The tool uses a newly developed hierarchical clustering algorithm to segment consecutive snapshots of dynamic graphs while preserving their chronological order. It also presents rich information on both individuals and communities extracted from dynamic graphs in multiple views, where users can explore dynamic graphs and choose what to tell in comics. For evaluation, we provide an example and report the results of a user study and an expert review. ","accessible_pdf":false,"authors":[{"affiliations":["Ulsan National Institute of Science and Technology, Ulsan, Korea, Republic of"],"email":"joohee@unist.ac.kr","is_corresponding":true,"name":"Joohee Kim"},{"affiliations":["Ulsan National Institute of Science and Technology, Ulsan, Korea, Republic of"],"email":"gusdnr0916@unist.ac.kr","is_corresponding":false,"name":"Hyunwook Lee"},{"affiliations":["Ulsan National Institute of Science and Technology, Ulsan, Korea, Republic of"],"email":"ducnm@unist.ac.kr","is_corresponding":false,"name":"Duc M. Nguyen"},{"affiliations":["Australian National University, Canberra, Australia"],"email":"minjeong.shin@anu.edu.au","is_corresponding":false,"name":"Minjeong Shin"},{"affiliations":["IBM Research, Cambridge, United States"],"email":"bumchul.kwon@us.ibm.com","is_corresponding":false,"name":"Bum Chul Kwon"},{"affiliations":["UNIST, Ulsan, Korea, Republic of"],"email":"sako@unist.ac.kr","is_corresponding":false,"name":"Sungahn Ko"},{"affiliations":["Aarhus University, Aarhus, Denmark"],"email":"elm@cs.au.dk","is_corresponding":false,"name":"Niklas Elmqvist"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1425","image_caption":"DG Comics offers a Summary View that facilitates the automatic generation of comic templates, sliders for filtering and highlighting nodes, a Graph Comic View for editing the graph comic, and Main Character and Supporting Character tables for managing nodes. It also includes a Timeline View for exploring graph snapshots. Users can switch to the Node Attribute Table to select specific main characters or to the Community View to inspect the evolution of node relationships. The tool supports (M) mental map preservation by fixing nodes across displays and visualizes (O) community changes using bubble sets.","keywords":["Data-driven storytelling, narrative visualization, dynamic graphs, graph comics"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.04874","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1425/v-full-1425_Preview.mp4?token=k2gdCk90u5wFDpzx5zb2D4M_tSdgJgLIUBwsRlEFYB0&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-full","session_youtube_ff_id":"qzU1QLDM4zs","session_youtube_ff_link":"https://youtu.be/qzU1QLDM4zs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h59m56s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T17:00:00Z","title":"DG Comics: Semi-Automatically Authoring Graph Comics for Dynamic Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243372104","abstract":"With the rise of short-form video platforms and the increasing availability of data, we see the potential for people to share short-form videos embedded with data in situ (e.g., daily steps when running) to increase the credibility and expressiveness of their stories. However, creating and sharing such videos in situ is challenging since it involves multiple steps and skills (e.g., data visualization creation and video editing), especially for amateurs. By conducting a formative study (N=10) using three design probes, we collected the motivations and design requirements. We then built VisTellAR, a mobile AR authoring tool, to help amateur video creators embed data visualizations in short-form videos in situ. A two-day user study shows that participants (N=12) successfully created various videos with data visualizations in situ and they confirmed the ease of use and learning. AR pre-stage authoring was useful to assist people in setting up data visualizations in reality with more designs in camera movements and interaction with gestures and physical objects to storytelling.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Wai Tong"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kento Shigyo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lin-Ping Yuan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Mingming Fan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ting-Chuen Pong"},{"affiliations":"","email":"","is_corresponding":false,"name":"Huamin Qu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Meng Xia"}],"award":"","doi":"10.1109/TVCG.2024.3372104","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243372104","image_caption":"This figure illustrates an authoring process. (a-b) VisTellAR detects planes and objects for users to anchor visualizations in reality. Users can edit the data, mark, axis, and behavior. (c-d) During video-taking, users can voice over, perform hand gestures, and see a countdown that notifies them when the visualization will be shown. (e-f) After taking the video, a timeline is shown to indicate when visualizations take place in the video. Users can reconfigure visualizations if needed. ","keywords":["Personal data, augmented reality, data visualization, storytelling, short-form video"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://www.researchgate.net/publication/378657335_VisTellAR_Embedding_Data_Visualization_to_Short-form_Videos_Using_Mobile_Augmented_Reality","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243372104/v-tvcg-20243372104_Preview.mp4?token=2PJhSCM9a-82r8KWVB_bvpn5XWzShv6y8d50FAY5m4o&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"EeX1q0ZhSII","session_youtube_ff_link":"https://youtu.be/EeX1q0ZhSII","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h23m29s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T16:24:00Z","title":"VisTellAR: Embedding Data Visualization to Short-form Videos Using Mobile Augmented Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243397004","abstract":"Data charts are prevalent across various fields due to their efficacy in conveying complex data relationships. However, static charts may sometimes struggle to engage readers and efficiently present intricate information, potentially resulting in limited understanding. We introduce \u201cLive Charts,\u201d a new format of presentation that decomposes complex information within a chart and explains the information pieces sequentially through rich animations and accompanying audio narration. We propose an automated approach to revive static charts into Live Charts. Our method integrates GNN-based techniques to analyze the chart components and extract data from charts. Then we adopt large natural language models to generate appropriate animated visuals along with a voice-over to produce Live Charts from static ones. We conducted a thorough evaluation of our approach, which involved the model performance, use cases, a crowd-sourced user study, and expert interviews. The results demonstrate Live Charts offer a multi-sensory experience where readers can follow the information and understand the data insights better. We analyze the benefits and drawbacks of Live Charts over static charts as a new information consumption experience.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Lu Ying"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yun Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haotian Li"},{"affiliations":"","email":"","is_corresponding":false,"name":"Shuguang Dou"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haidong Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xinyang Jiang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Huamin Qu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"10.1109/TVCG.2024.3397004","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243397004","image_caption":"Two Live Charts are presented: (a1-a5) and (b1-b5). The image flow illustrates the keyframes of the LiveChart, with animations highlighted by dotted blue boxes. The following text provides the corresponding audio narration, with the first tag identifying the chart component or type of insight being described.","keywords":["Charts, storytelling, machine learning, automatic visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243397004/v-tvcg-20243397004_Preview.mp4?token=br95VJa9I28ipJKCRSa-9txfELwn1J1Gapsz5LvOlKo&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"_-K7ygteIfM","session_youtube_ff_link":"https://youtu.be/_-K7ygteIfM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h47m57s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T16:48:00Z","title":"Reviving Static Charts into Live Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243411575","abstract":"Creating an animated data video with audio narration is a time-consuming and complex task that requires expertise. It involves designing complex animations, turning written scripts into audio narrations, and synchronizing visual changes with the narrations. This paper presents WonderFlow, an interactive authoring tool, that facilitates narration-centric design of animated data videos. WonderFlow allows authors to easily specify semantic links between text and the corresponding chart elements. Then it automatically generates audio narration by leveraging text-to-speech techniques and aligns the narration with an animation. WonderFlow provides a structure-aware animation library designed to ease chart animation creation, enabling authors to apply pre-designed animation effects to common visualization components. Additionally, authors can preview and refine their data videos within the same system, without having to switch between different creation tools. A series of evaluation results confirmed that WonderFlow is easy to use and simplifies the creation of data videos with narration-animation interplay.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Yun Wang"},{"affiliations":"","email":"","is_corresponding":true,"name":"Leixian Shen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zhengxin You"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xinhuan Shu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Bongshin Lee"},{"affiliations":"","email":"","is_corresponding":false,"name":"John Thompson"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haidong Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Dongmei Zhang"}],"award":"","doi":"10.1109/TVCG.2024.3411575","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243411575","image_caption":"User interface of WonderFlow. Users can first select the text phrases in the narration editor (a) and visual elements from the canvas (b) to form text-visual links. Then they can apply an animation preset selected in the animation effect panel (c) to the visual elements. WonderFlow then generates a narration-animation pack on the timeline (d).","keywords":["Data video, Data visualization, Narration-animation interplay, Storytelling, Authoring tool"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243411575/v-tvcg-20243411575_Preview.mp4?token=jHIqyZSMu4awRI-YQ30KvjX8iNUjp8d61CSq8x4Xn-w&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"qMT01pGEVfg","session_youtube_ff_link":"https://youtu.be/qMT01pGEVfg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h35m40s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T16:36:00Z","title":"WonderFlow: Narration-Centric Design of Animated Data Videos","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1288","abstract":"Data tables are one of the most common ways in which people encounter data. Although mostly built with text and numbers, data tables have a spatial layout and often exhibit visual elements meant to facilitate their reading. Surprisingly, there is an empirical knowledge gap on how people read tables and how different visual aids affect people's reading of tables. In this work, we seek to address this vacuum through a controlled study. We asked participants to repeatedly perform four different tasks with four table representation conditions (plain tables, tables with zebra striping, tables with cell background color encoding cell value, and tables with in-cell bars with lengths encoding cell value). We analyzed completion time, error rate, gaze-tracking data, mouse movement and participant preferences. We found that color and bar encodings help for finding maximum values. For a more complex task (comparison of proportional differences) color and bar helped less than zebra striping. We also characterize typical human behavior for the four tasks. These findings inform the design of tables and research directions for improving presentation of data in tabular form.","accessible_pdf":false,"authors":[{"affiliations":["University of Victoria, Victoria, Canada"],"email":"yongfengji@uvic.ca","is_corresponding":false,"name":"YongFeng Ji"},{"affiliations":["University of Victoria, Victoria, Canada"],"email":"cperin@uvic.ca","is_corresponding":true,"name":"Charles Perin"},{"affiliations":["University of Victoria, Victoria, Canada"],"email":"nacenta@gmail.com","is_corresponding":false,"name":"Miguel A Nacenta"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1288","image_caption":"We study the effects of one visual feature (zebra stripping, top right) and two visual encodings (color shading, bottom left, and data bars, bottom right) on the readability of numeric data tables, compared to a plain table (top left).","keywords":["Data Table, Visual Encoding, Visual Aid, Gaze Analysis, Zebra, Data Bars, Tabular Representations."],"open_access_supplemental_link":"https://osf.io/jfg3h/?view_only=f064cff189c4440299a3c3b10ddab232","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.31219/osf.io/2t3sc","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1288/v-full-1288_Preview.mp4?token=WrBIX_ELfQf5Wq5HyVgAdJ-Km4gCT2hW7QnwqnMe4og&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-full","session_youtube_ff_id":"U-KVskuEvz8","session_youtube_ff_link":"https://youtu.be/U-KVskuEvz8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=0h13m20s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T16:12:00Z","title":"The Effect of Visual Aids on Reading Numeric Data Tables","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1291","abstract":"Emotion is an important factor to consider when designing visualizations as it can impact the amount of trust viewers place in a visualization, how well they can retrieve information and understand the underlying data, and how much they engage with or connect to a visualization. We conducted five crowdsourced experiments to quantify the effects of color, chart type, data trend, data variability and data density on emotion (measured through self-reported arousal and valence). Results from our experiments show that there are multiple design elements which influence the emotion induced by a visualization and, more surprisingly, that certain data characteristics influence the emotion of viewers even when the data has no meaning. In light of these findings, we offer guidelines on how to use color, scale, and chart type to counterbalance and emphasize the emotional impact of immutable data characteristics.","accessible_pdf":false,"authors":[{"affiliations":["University of Waterloo, Waterloo, Canada","University of Victoria, Victoria, Canada"],"email":"cartergblair@gmail.com","is_corresponding":false,"name":"Carter Blair"},{"affiliations":["University of Victoria, Victoira, Canada","Delft University of Technology, Delft, Netherlands"],"email":"xiyao.wang23@gmail.com","is_corresponding":false,"name":"Xiyao Wang"},{"affiliations":["University of Victoria, Victoria, Canada"],"email":"cperin@uvic.ca","is_corresponding":true,"name":"Charles Perin"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1291","image_caption":"We quantify through five studies the effects of color (Study 1 and Study 2), chart type (Study 3, Study 4, and Study 5), data trend (Study 2 and Study 3), data variance (Study 4), and data density (Study 5) on emotion (measured through arousal and valence ratings using the Self-Assessment Manikin scale).","keywords":["Affect, Data Visualization, Emotion, Quantitative Study"],"open_access_supplemental_link":"https://osf.io/ywjs4/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.48550/arXiv.2407.18427","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1291/v-full-1291_Preview.mp4?token=libWQ7VS7pdmjYQyKZ3-lnWX54SUtumA-g-Tno70Egk&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-full","session_youtube_ff_id":"Hht8iAtJ40w","session_youtube_ff_link":"https://youtu.be/Hht8iAtJ40w","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=0h25m35s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T16:24:00Z","title":"Quantifying Emotional Responses to Immutable Data Characteristics and Designer Choices in Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1480","abstract":"We propose the notion of attention-aware visualizations (AAVs) that track the user\u2019s perception of a visual representation over time and feed this information back to the visualization. Such context awareness is particularly useful for ubiquitous and immersive analytics where knowing which embedded visualizations the user is looking at can be used to make visualizations react appropriately to the user\u2019s attention: for example, by highlighting data the user has not yet seen. We can separate the approach into three components: (1) measuring the user\u2019s gaze on a visualization and its parts; (2) tracking the user\u2019s attention over time; and (3) reactively modifying the visual representation based on the current attention metric. In this paper, we present two separate implementations of AAV: a 2D data-agnostic method for web-based visualizations that can use an embodied eyetracker to capture the user\u2019s gaze, and a 3D data-aware one that uses the stencil buffer to track the visibility of each individual mark in a visualization. Both methods provide similar mechanisms for accumulating attention over time and changing the appearance of marks in response. We also present results from a qualitative evaluation studying visual feedback and triggering mechanisms for capturing and revisualizing attention.","accessible_pdf":false,"authors":[{"affiliations":["Aarhus University, Aarhus, Denmark"],"email":"arvind@cs.au.dk","is_corresponding":false,"name":"Arvind Srinivasan"},{"affiliations":["Aarhus University, Aarhus N, Denmark"],"email":"johannes@ellemose.eu","is_corresponding":false,"name":"Johannes Ellemose"},{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"p.butcher@bangor.ac.uk","is_corresponding":false,"name":"Peter W. S. Butcher"},{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"p.ritsos@bangor.ac.uk","is_corresponding":false,"name":"Panagiotis D. Ritsos"},{"affiliations":["Aarhus University, Aarhus, Denmark"],"email":"elm@cs.au.dk","is_corresponding":false,"name":"Niklas Elmqvist"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1480","image_caption":"This image illustrates various Attention-aware re-visualization techniques that adapt based on user attention in both 3D and 2D spaces. The left side of the image focuses on our \u201cData Aware 3D\u201d implementation applying GPU Color Picking, featuring heatmaps and desaturation techniques that respond to user orientation, rotation, and location within a 3D environment. The right side displays our \u201cData Agnostic 2D\u201d implementation applying a Picture Framing Metaphor, highlighting how user attention, tracked through gaze, pointer, and keyboard input, shapes different frames like bar, area, and heat maps. These revisualizations that adjust dynamically to emphasize areas of interest based on cumulative attention were then qualitatively evaluated across different triggering mechanisms.","keywords":["Attention tracking, eyetracking, immersive analytics, ubiquitous analytics, post-WIMP interaction"],"open_access_supplemental_link":"https://osf.io/8mfhp/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2404.10732","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1480/v-full-1480_Preview.mp4?token=6hU4GabNxurQqMyFUGJ3MOpoqLwws2FyOL0qkKNd3Uk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1480/v-full-1480_Preview.srt?token=YWRI43FlHCRbJh6Gvl67T1d1OK9jEOT80_8LNGHH1GY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-full","session_youtube_ff_id":"cDGkQpk85yw","session_youtube_ff_link":"https://youtu.be/cDGkQpk85yw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=1h2m25s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T17:00:00Z","title":"Attention-Aware Visualization: Tracking and Responding to User Perception Over Time","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1638","abstract":"Probability density function (PDF) curves are among the few charts on a Cartesian coordinate system that are commonly presented without y-axes. This design decision may be due to the lack of relevance of vertical scaling in normal PDFs. In fact, as long as two normal PDFs have the same means and standard deviations (SDs), they can be scaled to occupy different amounts of vertical space while still remaining statistically identical. Because unfixed PDF height increases as SD decreases, visualization designers may find themselves tempted to vertically shrink low-SD PDFs to avoid occlusion or save white space in their figures. Although irregular vertical scaling has been explored in bar and line charts, the visualization community has yet to investigate how this visual manipulation may affect reader comparisons of PDFs. In this paper, we present two preregistered experiments (n = 600, n = 401) that systematically demonstrate that vertical scaling can lead to misinterpretations of PDFs. We also test visual interventions to mitigate misinterpretation. In some contexts, we find including a y-axis can help reduce this effect. Overall, we find that keeping vertical scaling consistent, and therefore maintaining equal pixel areas under PDF curves, results in the highest likelihood of accurate comparisons. Our findings provide insights into the impact of vertical scaling on PDFs, and reveal the complicated nature of proportional area comparisons.","accessible_pdf":true,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"racquel.fygenson@gmail.com","is_corresponding":true,"name":"Racquel Fygenson"},{"affiliations":["Northeastern University, Boston, United States"],"email":"l.padilla@northeastern.edu","is_corresponding":false,"name":"Lace M. Padilla"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1638","image_caption":"When showing multiple probability density function (PDF) plots, it can be compelling to shrink plots with small standard deviations that have tall peaks. This compression may save space and make figures look nicer, but could this compression impact reader comprehension? In this paper, we compare the impact of \"squishing\" PDF plots and find reader comparison of plots with different vertical scales is lower than that of plots with the same vertical scale. ","keywords":["visualization, probability density function, uncertainty, vertical scaling, perception, area chart"],"open_access_supplemental_link":"https://osf.io/7k5un/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/w3dgq","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1638/v-full-1638_Preview.mp4?token=vDxDHiVPykap408m44TBUUDPrz6zAmggZ2fhZxGb1sI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1638/v-full-1638_Preview.srt?token=SrQKxfxzDSTFwU5Cx8rPsqLwzeyUJn5189JlI0wpB7I&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-full","session_youtube_ff_id":"nHx017A7OcI","session_youtube_ff_link":"https://youtu.be/nHx017A7OcI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=0h0m20s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T16:00:00Z","title":"The Impact of Vertical Scaling on Normal Probability Density Function Plots","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233336588","abstract":"This article explores how the ability to recall information in data visualizations depends on the presentation technology. Participants viewed 10 Isotype visualizations on a 2D screen, in 3D, in Virtual Reality (VR) and in Mixed Reality (MR). To provide a fair comparison between the three 3D conditions, we used LIDAR to capture the details of the physical rooms, and used this information to create our textured 3D models. For all environments, we measured the number of visualizations recalled and their order (2D) or spatial location (3D, VR, MR). We also measured the number of syntactic and semantic features recalled. Results of our study show increased recall and greater richness of data understanding in the MR condition. Not only did participants recall more visualizations and ordinal/spatial positions in MR, but they also remembered more details about graph axes and data mappings, and more information about the shape of the data. We discuss how differences in the spatial and kinesthetic cues provided in these different environments could contribute to these results, and reasons why we did not observe comparable performance in the 3D and VR conditions.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Christophe Hurter"},{"affiliations":"","email":"","is_corresponding":false,"name":"Bernice Rogowitz"},{"affiliations":"","email":"","is_corresponding":false,"name":"Guillaume Truong"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tiffany Andry"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hugo Romat"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ludovic Gardy"},{"affiliations":"","email":"","is_corresponding":false,"name":"Fereshteh Amini"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nathalie Henry Riche"}],"award":"","doi":"10.1109/TVCG.2023.3336588","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233336588","image_caption":"In this study, inspired by the memory palace technique, we explore how different presentation technologies impact the recall of data, specifically using Isotypes. ","keywords":["Data visualization, Three-dimensional displays, Virtual reality, Mixed reality, Electronic mail, Syntactics, Semantics"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233336588/v-tvcg-20233336588_Preview.mp4?token=NXF49TRW3Iw2249Btsx68oteEXEikPfeZHc82cpwHnA&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-tvcg","session_youtube_ff_id":"grzXRIstvMk","session_youtube_ff_link":"https://youtu.be/grzXRIstvMk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=0h50m0s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T16:48:00Z","title":"Memory Recall for Data Visualizations in Mixed Reality, Virtual Reality, 3D, and 2D","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243372620","abstract":"Small multiples are a popular visualization method, displaying different views of a dataset using multiple frames, often with the same scale and axes. However, there is a need to address their potential constraints, especially in the context of human cognitive capacity limits. These limits dictate the maximum information our mind can process at once. We explore the issue of capacity limitation by testing competing theories that describe how the number of frames shown in a display, the scale of the frames, and time constraints impact user performance with small multiples of line charts in an energy grid scenario. In two online studies (Experiment 1 n = 141 and Experiment 2 n = 360) and a follow-up eye-tracking analysis (n=5),we found a linear decline in accuracy with increasing frames across seven tasks, which was not fully explained by differences in frame size, suggesting visual search challenges. Moreover, the studies demonstrate that highlighting specific frames can mitigate some visual search difficulties but, surprisingly, not eliminate them. This research offers insights into optimizing the utility of small multiples by aligning them with human limitations.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Helia Hosseinpour"},{"affiliations":"","email":"","is_corresponding":false,"name":"Laura E. Matzen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kristin M. Divis"},{"affiliations":"","email":"","is_corresponding":false,"name":"Spencer C. Castro"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lace Padilla"}],"award":"","doi":"10.1109/TVCG.2024.3372620","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243372620","image_caption":"Fig. 1: Example of the small multiple stimuli used in Experiment 1 that varied in frame quantity from 2 to 70, incremented by four frames. The stimuli depicted power (in megawatts) over time (one year per frame).","keywords":["Cognition, small multiples, time-series data"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/psyarxiv/a6k8z","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243372620/v-tvcg-20243372620_Preview.mp4?token=wJKQdng-SaTIZarhp7tlUrcbG5MCiUwtGwuxwhuMdJo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243372620/v-tvcg-20243372620_Preview.srt?token=SpxX29O_iIJjG_XPF7jCMxjFQ8v7Th7nCw-bF1NFT4o&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-tvcg","session_youtube_ff_id":"i_ZRWKrK2fs","session_youtube_ff_link":"https://youtu.be/i_ZRWKrK2fs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=0h37m35s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T16:36:00Z","title":"Examining Limits of Small Multiples: Frame Quantity Impacts Judgments with Line Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1214","abstract":"Graphs are often used to model relationships between entities. The identification and visualization of clusters in graphs enable insight discovery in many application areas, such as life sciences and social sciences. Force-directed graph layouts promote the visual saliency of clusters, as they bring adjacent nodes closer together, and push non-adjacent nodes apart. At the same time, matrices can effectively show clusters when a suitable row/column ordering is applied, but are less appealing to untrained users not providing an intuitive node-link metaphor. It is thus worth exploring layouts combining the strengths of the node-link metaphor and node ordering. In this work, we study the impact of node ordering on the visual saliency of clusters in orderable node-link diagrams, namely radial diagrams, arc diagrams and symmetric arc diagrams. Through a crowdsourced controlled experiment, we show that users can count clusters consistently more accurately, and to a large extent faster, with orderable node-link diagrams than with three state-of-the art force-directed layout algorithms, i.e., `Linlog', `Backbone' and `sfdp'. The measured advantage is greater in case of low cluster separability and/or low compactness. A free copy of this paper and all supplemental materials are available at https://osf.io/kc3dg/.","accessible_pdf":false,"authors":[{"affiliations":["Luxembourg Institute of Science and Technology, Esch-sur-Alzette, Luxembourg"],"email":"nora.alnaami@list.lu","is_corresponding":false,"name":"Nora Al-Naami"},{"affiliations":["Luxembourg Institute of Science and Technology, Belvaux, Luxembourg"],"email":"nicolas.medoc@list.lu","is_corresponding":false,"name":"Nicolas Medoc"},{"affiliations":["Uppsala University, Uppsala, Sweden"],"email":"matteo.magnani@it.uu.se","is_corresponding":false,"name":"Matteo Magnani"},{"affiliations":["Luxembourg Institute of Science and Technology, Belvaux, Luxembourg"],"email":"mohammad.ghoniem@list.lu","is_corresponding":true,"name":"Mohammad Ghoniem"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1214","image_caption":"A symmetric arc diagram representing a 51-node graph extracted from the co-occurrence network of characters of \"Les Mis\u00e9rables\", the novel of Victor Hugo. The nodes are ordered according to the crossing reduction algorithm. ","keywords":["network visualization, arc diagrams, radial diagrams, cluster perception, graph seriation"],"open_access_supplemental_link":"https://osf.io/kc3dg/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-04668352","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1214/v-full-1214_Preview.mp4?token=Eq1d9_8bf1Cs8leruLVlhzqxc5uiAWsof3tZ5n9GSsE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1214/v-full-1214_Preview.srt?token=vw2UqBRc1sxfQeUX2e6GTIEpHwT9U6tNFr7atJhP8hY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-full","session_youtube_ff_id":"8QT8_S2C0fs","session_youtube_ff_link":"https://youtu.be/8QT8_S2C0fs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=0h0m34s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T17:45:00Z","title":"Improved Visual Saliency of Graph Clusters with Orderable Node-Link Layouts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1483","abstract":"Egocentric networks, often visualized as node-link diagrams, portray the complex relationship (link) dynamics between an entity (node) and others. However, common analytics tasks are multifaceted, encompassing interactions among four key aspects: strength, function, structure, and content. Current node-link visualization designs may fall short, focusing narrowly on certain aspects and neglecting the holistic, dynamic nature of egocentric networks. To bridge this gap, we introduce SpreadLine, a novel visualization framework designed to enable the visual exploration of egocentric networks from these four aspects at the microscopic level. Leveraging the intuitive appeal of storyline visualizations, SpreadLine adopts a storyline-based design to represent entities and their evolving relationships. We further encode essential topological information in the layout and condense the contextual information in a metro map metaphor, allowing for a more engaging and effective way to explore temporal and attribute-based information. To guide our work, with a thorough review of pertinent literature, we have distilled a task taxonomy that addresses the analytical needs specific to egocentric network exploration.Acknowledging the diverse analytical requirements of users, SpreadLine offers customizable encodings to enable users to tailor the framework for their tasks. We demonstrate the efficacy and general applicability of SpreadLine through three diverse real-world case studies (disease surveillance, social media trends, and academic career evolution) and a usability study. ","accessible_pdf":true,"authors":[{"affiliations":["University of California, Davis, Davis, United States"],"email":"yskuo@ucdavis.edu","is_corresponding":true,"name":"Yun-Hsin Kuo"},{"affiliations":["University of California at Davis, Davis, United States"],"email":"dyuliu@ucdavis.edu","is_corresponding":false,"name":"Dongyu Liu"},{"affiliations":["University of California at Davis, Davis, United States"],"email":"ma@cs.ucdavis.edu","is_corresponding":false,"name":"Kwan-Liu Ma"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1483","image_caption":"SpreadLine is a visualization framework for exploring dynamic egocentric networks. It builds upon storyline visualizations to represent four network aspects: structure, strength, function, and content. Guided by a literature review, SpreadLine addresses essential analysis tasks and offers customizable encodings to meet diverse user needs. This figure presents an example of SpreadLine showing public reaction to a significant event.","keywords":["egocentric network, network analysis, design study, storyline visualization, visual exploration, metaphor"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1483/v-full-1483_Preview.mp4?token=ytXWxPflKOx5y9augCJSEbmxJEVDgPNM0UIRemLg5Lo&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-full","session_youtube_ff_id":"N4HpqmtLsDc","session_youtube_ff_link":"https://youtu.be/N4HpqmtLsDc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=0h26m40s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T18:09:00Z","title":"SpreadLine: Visualizing Egocentric Dynamic Influence","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1809","abstract":"Visualizing relational data is crucial for understanding complex connections between entities in social networks, political affiliations, or biological interactions. Well-known representations like node-link diagrams and adjacency matrices offer valuable insights, but their effectiveness relies on the ability to identify patterns in the underlying topological structure. Reordering strategies and layout algorithms play a vital role in the visualization process since the arrangement of nodes, edges, or cells influences the visibility of these patterns. The BioFabric visualization combines elements of node-link diagrams and adjacency matrices, leveraging the strengths of both, the visual clarity of node-link diagrams and the tabular organization of adjacency matrices.A unique characteristic of BioFabric is the possibility to reorder nodes and edges separately.This raises the question of which combination of layout algorithms best reveals certain patterns. In this paper, we discuss patterns and anti-patterns in BioFabric, such as staircases or escalators, relate them to already established patterns, and propose metrics to evaluate their quality. Based on these quality metrics, we compared combinations of well-established reordering techniques applied to BioFabric with a well-known benchmark data set. Our experiments indicate that the edge order has a stronger influence on revealing patterns than the node layout. The results show that the best combination for revealing staircases is a barycentric node layout, together with an edge order based on node indices and length.Our research contributes a first building block for many promising future research directions, which we also share and discuss. A free copy of this paper and all supplemental materials are available at https://osf.io/9mt8r/?view_only=b70dfbe550e3404f83059afdc60184c6","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"fuchs@dbvis.inf.uni-konstanz.de","is_corresponding":true,"name":"Johannes Fuchs"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"alexander.frings@uni-konstanz.de","is_corresponding":false,"name":"Alexander Frings"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"maria-viktoria.heinle@uni-konstanz.de","is_corresponding":false,"name":"Maria-Viktoria Heinle"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"},{"affiliations":["University of Konstanz, Konstanz, Germany","TU Wien, Vienna, Austria"],"email":"sara.di-bartolomeo@uni-konstanz.de","is_corresponding":false,"name":"Sara Di Bartolomeo"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1809","image_caption":"The same synthetic data is visualized with BioFabric. The edge order has a huge influence on the appearance of patterns. A random edge order shows no topological structure, whereas our degreecending technique reveals three staircases and one path.","keywords":["Network Visualization, Graph Drawing, Graph Layout Algorithms, BioFabric, Graph Motif"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1809/v-full-1809_Preview.mp4?token=sYnN8_S710yTFOr4sy8fpyeSUYtQ5sxjnLXbRBwWYls&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1809/v-full-1809_Preview.srt?token=bMkBvuiBg59LNAB_X3tZa5xdzkXahJe4Z42ye7nZqwA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-full","session_youtube_ff_id":"z5Loo1vtnXg","session_youtube_ff_link":"https://youtu.be/z5Loo1vtnXg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=0h12m40s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T17:57:00Z","title":"Quality Metrics and Reordering Strategies for Revealing Patterns in BioFabric Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1874","abstract":"A layered graph is an important category of graph in which every node is assigned to a layer, and layers are drawn as parallel or radial lines. They are commonly used to display temporal data or hierarchical graphs. Previous research has demonstrated that minimizing edge crossings is the most important criterion to consider when looking to improve the readability of such graphs. While heuristic approaches exist for crossing minimization, we are interested in optimal approaches to the problem that prioritize human readability over computational scalability. We aim to improve the usefulness and applicability of such optimal methods by understanding and improving their scalability to larger graphs. This paper categorizes and evaluates the state-of-the-art linear programming formulations for exact crossing minimization and describes nine new and existing techniques that could plausibly accelerate the optimization algorithm. Through a computational evaluation, we explore each technique's effect on calculation time and how the techniques assist or inhibit one another, allowing researchers and practitioners to adapt them to the characteristics of their graphs. Our best-performing techniques yielded a median improvement of 2.5\u201317x depending on the solver used, giving us the capability to create optimal layouts faster and for larger graphs. We provide an open-source implementation of our methodology in Python, where users can pick which combination of techniques to enable according to their use case. A free copy of this paper and all supplemental materials, datasets used, and source code are available at https://osf.io/5vq79.","accessible_pdf":true,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"wilson.conn@northeastern.edu","is_corresponding":true,"name":"Connor Wilson"},{"affiliations":["Northeastern University, Boston, United States"],"email":"eduardopuertac@gmail.com","is_corresponding":false,"name":"Eduardo Puerta"},{"affiliations":["northeastern university, Boston, United States"],"email":"turokhunter@gmail.com","is_corresponding":false,"name":"Tarik Crnovrsanin"},{"affiliations":["University of Konstanz, Konstanz, Germany","Northeastern University, Boston, United States"],"email":"sara.di-bartolomeo@uni-konstanz.de","is_corresponding":false,"name":"Sara Di Bartolomeo"},{"affiliations":["Northeastern University, Boston, United States"],"email":"c.dunne@northeastern.edu","is_corresponding":false,"name":"Cody Dunne"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1874","image_caption":"In this work, we characterize nine techniques to improve the performance of an integer linear programming (ILP) formulation and empirically test their improvement. We call these switches since they can be toggled and combined. Here, the behavior of the one of the switches, symmetry breaking, is illustrated. This technique removes redundancy in the model by fixing one of the decision variables. We find that use of the switch almost invariably improves the speed of the optimization solver.","keywords":["Integer linear programming, layered graph drawing, layered network visualization, crossing minimization, edge crossings"],"open_access_supplemental_link":"https://osf.io/5vq79","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1874/v-full-1874_Preview.mp4?token=G85Efq1rI0eSTL1MK_drwz50PwtWDcSI-zBmJYnTLUE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-full","session_youtube_ff_id":"wIQnahaRsKk","session_youtube_ff_link":"https://youtu.be/wIQnahaRsKk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=1h2m40s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T18:45:00Z","title":"Evaluating and extending speedup techniques for optimal crossing minimization in layered graph drawings","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233310019","abstract":"The dynamic network visualization design space consists of two major dimensions: network structural and temporal representation. As more techniques are developed and published, a clear need for evaluation and experimental comparisons between them emerges. Most studies explore the temporal dimension and diverse interaction techniques supporting the participants, focusing on a single structural representation. Empirical evidence about performance and preference for different visualization approaches is scattered over different studies, experimental settings, and tasks. This paper aims to comprehensively investigate the dynamic network visualization design space in two evaluations. First, a controlled study assessing participants' response times, accuracy, and preferences for different combinations of network structural and temporal representations on typical dynamic network exploration tasks, with and without the support of standard interaction methods. Second, the best-performing combinations from the first study are enhanced based on participants' feedback and evaluated in a heuristic-based qualitative study with visualization experts on a real-world network. Our results highlight node-link with animation and playback controls as the best-performing combination and the most preferred based on ratings. Matrices achieve similar performance to node-link in the first study but have considerably lower scores in our second evaluation. Similarly, juxtaposition exhibits evident scalability issues in more realistic analysis contexts.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Velitchko Filipov"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alessio Arleo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Markus B\u00f6gl"},{"affiliations":"","email":"","is_corresponding":false,"name":"Silvia Miksch"}],"award":"","doi":"10.1109/TVCG.2023.3310019","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233310019","image_caption":"This study evaluates the effectiveness of various network structural and temporal encodings in dynamic network visualization, focusing on Node-Link diagrams and Adjacency Matrices. Through two comprehensive studies, we assessed the accuracy, response times, and user preferences for different visualization techniques, including Juxtaposition, Superimposition, Auto-Animation, and Animation with Playback Controls. Our findings highlight the strengths and limitations of each approach, providing critical insights for optimizing dynamic network analysis and designing with tasks in mind. The figure illustrates key methods: Network structural and temporal encodings\u2014Juxtaposition (A,D), Superimposition (B,E), and Animation with Playback Controls (C,F).","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233310019/v-tvcg-20233310019_Preview.mp4?token=02r77JEMELRLQ63amQMGJlcBzC5lAQwyWngc-6ZJD6Y&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233310019/v-tvcg-20233310019_Preview.srt?token=pFAzbhYPtALNNUdKtVzOnFtRyLq7V2pk4xP5fjJgUCc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-tvcg","session_youtube_ff_id":"kvHH763cMkU","session_youtube_ff_link":"https://youtu.be/kvHH763cMkU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=0h38m15s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T18:21:00Z","title":"On Network Structural and Temporal Encodings: A Space and Time Odyssey","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233337396","abstract":"Partitioning a dynamic network into subsets (i.e., snapshots) based on disjoint time intervals is a widely used technique for understanding how structural patterns of the network evolve. However, selecting an appropriate time window (i.e., slicing a dynamic network into snapshots) is challenging and time-consuming, often involving a trial-and-error approach to investigating underlying structural patterns. To address this challenge, we present MoNetExplorer, a novel interactive visual analytics system that leverages temporal network motifs to provide recommendations for window sizes and support users in visually comparing different slicing results. MoNetExplorer provides a comprehensive analysis based on window size, including (1) a temporal overview to identify the structural information, (2) temporal network motif composition, and (3) node-link-diagram-based details to enable users to identify and understand structural patterns at various temporal resolutions. To demonstrate the effectiveness of our system, we conducted a case study with network researchers using two real-world dynamic network datasets. Our case studies show that the system effectively supports users to gain valuable insights into the temporal and structural aspects of dynamic networks.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Seokweon Jung"},{"affiliations":"","email":"","is_corresponding":false,"name":"DongHwa Shin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hyeon Jeon"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kiroong Choe"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jinwook Seo"}],"award":"","doi":"10.1109/TVCG.2023.3337396","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233337396","image_caption":"MoNetExplorer is a visual analytics system designed to support the selection of appropriate window sizes for dynamic network analysis and provides a temporal and structural analysis of snapshots that are sliced according to window sizes. The system is composed of five linked components. (A) Slicing Navigation View supports the beginning of the workflow: selection of snapshot window sizes according to measures based on Temporal Network Motifs (TNM). (B) Temporal Measure View and (C) Temporal Status View enable validation of the quality of snapshots and identification of temporal patterns. (D) Motif Composition View visualizes the composition of temporal network motifs. (E) Bottom-level details of network structure are shown in Network View.","keywords":["Visual analytics, Measurement, Size measurement, Windows, Time measurement, Data visualization, Task analysis, Visual analytics, Dynamic networks, Temporal network motifs, Interactive network slicing"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233337396/v-tvcg-20233337396_Preview.mp4?token=0KbM6aJwuxs0G0Rpw-vPJ-2xQQAfINseTMtQMJ2zaJg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233337396/v-tvcg-20233337396_Preview.srt?token=TOU6HuziX7iYDRKMcEVe6c9fvmCcyt7m6ExjtTavEJs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-tvcg","session_youtube_ff_id":"8ShT_DsTgyQ","session_youtube_ff_link":"https://youtu.be/8ShT_DsTgyQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=0h50m45s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T18:33:00Z","title":"MoNetExplorer: A Visual Analytics System for Analyzing Dynamic Networks with Temporal Network Motifs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1147","abstract":"Large Language Models (LLMs) like GPT-4 which support multimodal input (i.e., prompts containing images in addition to text) have immense potential to advance visualization research. However, many questions exist about the visual capabilities of such models, including how well they can read and interpret visually represented data. In our work, we address this question by evaluating the GPT-4 multimodal LLM using a suite of task sets meant to assess the model's visualization literacy. The task sets are based on existing work in the visualization community addressing both automated chart question answering and human visualization literacy across multiple settings. Our assessment finds that GPT-4 can perform tasks such as recognizing trends and extreme values, and also demonstrates some understanding of visualization design best-practices. By contrast, GPT-4 struggles with simple value retrieval when not provided with the original dataset, lacks the ability to reliably distinguish between colors in charts, and occasionally suffers from hallucination and inconsistency. We conclude by reflecting on the model's strengths and weaknesses as well as the potential utility of models like GPT-4 for future visualization research. We also release all code, stimuli, and results for the task sets at the following link: https://doi.org/10.17605/OSF.IO/F39J6","accessible_pdf":true,"authors":[{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"abendeck3@gatech.edu","is_corresponding":true,"name":"Alexander Bendeck"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"john.stasko@cc.gatech.edu","is_corresponding":false,"name":"John Stasko"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1147","image_caption":"Large vision-language models like GPT-4V are extremely powerful, but we have little understanding of their visualization literacy capabilities. We conduct an empirical evaluation of the GPT-4V model on four tasks from the visualization literature related to visualization literacy: (1) the Visualization Literacy Assessment Test (VLAT); (2) a chart question answering dataset; (3) a set of questions about deceptive visualization design choices; and (4) a set of questions about visualizations with misaligned titles. We also release all materials and code to support future research.","keywords":["Visualization Literacy, Large Language Models, Natural Language"],"open_access_supplemental_link":"https://doi.org/10.17605/OSF.IO/F39J6","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1147/v-full-1147_Preview.mp4?token=RSeiBOIa3FraoklgtgzwYzc1xP-0gRTZoGxLbeHvDUg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1147/v-full-1147_Preview.srt?token=lZ1QakQa_RMk3QXJ6EXcMjlynz8iPpX8pLJ9QRINsfw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-full","session_youtube_ff_id":"Nr30W716yjI","session_youtube_ff_link":"https://youtu.be/Nr30W716yjI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=0h48m23s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T13:18:00Z","title":"An Empirical Evaluation of the GPT-4 Multimodal Language Model on Visualization Literacy Tasks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1275","abstract":"We developed and validated an instrument to measure the perceived readability in data visualization: PREVis. Researchers and practitioners can easily use this instrument as part of their evaluations to compare the perceived readability of different visual data representations. Our instrument can complement results from controlled experiments on user task performance or provide additional data during in-depth qualitative work such as design iterations when developing a new technique. Although readability is recognized as an essential quality of data visualizations, so far there has not been a unified definition of the construct in the context of visual representations. As a result, researchers often lack guidance for determining how to ask people to rate their perceived readability of a visualization. To address this issue, we engaged in a rigorous process to develop the first validated instrument targeted at the subjective readability of visual data representations. Our final instrument consists of 11 items across 4 dimensions: understandability, layout clarity, readability of data values, and readability of data patterns. We provide the questionnaire as a document with implementation guidelines on osf.io/9cg8j. Beyond this instrument, we contribute a discussion of how researchers have previously assessed visualization readability, and an analysis of the factors underlying perceived readability in visual data representations.","accessible_pdf":false,"authors":[{"affiliations":["LISN, Universit\u00e9 Paris Saclay, CNRS, Orsay, France","Aviz, Inria, Saclay, France"],"email":"acabouat@gmail.com","is_corresponding":true,"name":"Anne-Flore Cabouat"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tingying.he@inria.fr","is_corresponding":false,"name":"Tingying He"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"petra.isenberg@inria.fr","is_corresponding":false,"name":"Petra Isenberg"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tobias.isenberg@gmail.com","is_corresponding":false,"name":"Tobias Isenberg"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1275","image_caption":"PREVis is a reliable instrument that allows respondents to rate how readable they find a static data visualization across 4 dimensions: layout clarity, ease of understanding, ease of reading data features, and ease of reading data values. ","keywords":["Visualization, readability, validated instrument, perception, user experiments, empirical methods, methodology"],"open_access_supplemental_link":"https://osf.io/9cg8j","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14908","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1275/v-full-1275_Preview.mp4?token=s-3k7ce13wMP2CJ8tVZi79RhIyDe-RpGRBaPn6usyD4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1275/v-full-1275_Preview.srt?token=jehYCBF9wLwHrXRT8HS61Er9DvdhUWKlYcYKNXGKLzE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-full","session_youtube_ff_id":"SmrTAspA0PM","session_youtube_ff_link":"https://youtu.be/SmrTAspA0PM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=0h25m5s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T12:54:00Z","title":"PREVis: Perceived Readability Evaluation for Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1318","abstract":"In this study, we address the growing issue of misleading charts, a prevalent problem that undermines the integrity of information dissemination. Misleading charts can distort the viewer\u2019s perception of data, leading to misinterpretations and decisions based on false information. The development of effective automatic detection methods for misleading charts is an urgent field of research. The recent advancement of multimodal Large Language Models (LLMs) has introduced a promising direction for addressing this challenge. We explored the capabilities of these models in analyzing complex charts and assessing the impact of different prompting strategies on the models\u2019 analyses. We utilized a dataset of misleading charts collected from the internet by prior research and crafted nine distinct prompts, ranging from simple to complex, to test the ability of four different multimodal LLMs in detecting over 21 different chart issues. Through three experiments\u2013from initial exploration to detailed analysis\u2013we progressively gained insights into how to effectively prompt LLMs to identify misleading charts and developed strategies to address the scalability challenges encountered as we expanded our detection range from the initial five issues to 21 issues in the final experiment. Our findings reveal that multimodal LLMs possess a strong capability for chart comprehension and critical thinking in data interpretation. There is significant potential in employing multimodal LLMs to counter misleading information by supporting critical thinking and enhancing visualization literacy. This study demonstrates the applicability of LLMs in addressing the pressing concern of misleading charts.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"yhload@cse.ust.hk","is_corresponding":true,"name":"Leo Yu-Ho Lo"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1318","image_caption":"The paper title is \"How Good (Or Bad) Are LLMs at Detecting Misleading Visualizations?\" On the left hand side, the LLM reponse correctly identified the chart as misleading and gave a relevant reason. On the right hand side, the LLM reponse incorrectly and gave a wrong interpretation. ","keywords":["Deceptive Visualization, Large Language Models, Prompt Engineering"],"open_access_supplemental_link":"https://osf.io/vx526","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.17291","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1318/v-full-1318_Preview.mp4?token=envXKktkNZwTv1_vrEuKFaxtBG3OCJrz4N0Q-JdsXgo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1318/v-full-1318_Preview.srt?token=XlCfncPmSXCwi9SwrFtEr1wrliOw30bEUuUCihgYlSg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-full","session_youtube_ff_id":"LYcwSpyRxR8","session_youtube_ff_link":"https://youtu.be/LYcwSpyRxR8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=1h0m23s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T13:30:00Z","title":"How Good (Or Bad) Are LLMs in Detecting Misleading Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1422","abstract":"Visualization items\u2014factual questions about visualizations that ask viewers to accomplish visualization tasks\u2014are regularly used in the field of information visualization as educational and evaluative materials. For example, researchers of visualization literacy require large, diverse banks of items to conduct studies where the same skill is measured repeatedly on the same participants. Yet, generating a large number of high-quality, diverse items requires significant time and expertise. To address the critical need for a large number of diverse visualization items in education and research, this paper investigates the potential for large language models (LLMs) to automate the generation of multiple-choice visualization items. Through an iterative design process, we develop the VILA (Visualization Items Generated by Large LAnguage Models) pipeline, for efficiently generating visualization items that measure people\u2019s ability to accomplish visualization tasks. We use the VILA pipeline to generate 1,404 candidate items across 12 chart types and 13 visualization tasks. In collaboration with 11 visualization experts, we develop an evaluation rulebook which we then use to rate the quality of all candidate items. The result is the VILA bank of \u223c1,100 items. From this evaluation, we also identify and classify current limitations of the VILA pipeline, and discuss the role of human oversight in ensuring quality. In addition, we demonstrate an application of our work by creating a visualization literacy test, VILA-VLAT, which measures people\u2019s ability to complete a diverse set of tasks on various types of visualizations; comparing it to the existing VLAT, VILA-VLAT shows moderate to high convergent validity (R = 0.70). Lastly, we discuss the application areas of the VILA pipeline and the VILA bank and provide practical recommendations for their use. All supplemental materials are available at https://osf.io/ysrhq/.","accessible_pdf":false,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"yuancui2025@u.northwestern.edu","is_corresponding":true,"name":"Yuan Cui"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"wanqian.ge@northwestern.edu","is_corresponding":false,"name":"Lily W. Ge"},{"affiliations":["Worcester Polytechnic Institute, Worcester, United States"],"email":"yding5@wpi.edu","is_corresponding":false,"name":"Yiren Ding"},{"affiliations":["Worcester Polytechnic Institute, Worcester, United States"],"email":"ltharrison@wpi.edu","is_corresponding":false,"name":"Lane Harrison"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"fumeng.p.yang@gmail.com","is_corresponding":false,"name":"Fumeng Yang"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1422","image_caption":"Overview of this paper: developing the VILA pipeline, evaluating the candidate bank, and demonstrating a potential application\u2014 the new VILA-VLAT visualization literacy test.","keywords":["Visualization Items, Large Language Models, Visualization Literacy Assessment"],"open_access_supplemental_link":"https://osf.io/ysrhq/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/ysrhq/","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1422/v-full-1422_Preview.mp4?token=yDyFYRJ4pisyltZsmx0eDNmY6u8zITqwq3wrjK1BCnU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1422/v-full-1422_Preview.srt?token=4IZvyXEtEwm3VyE5Dn48x1WT8TKkCOZep1VTeEXEFMo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-full","session_youtube_ff_id":"dA4Z80m5Rzs","session_youtube_ff_link":"https://youtu.be/dA4Z80m5Rzs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=0h36m7s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T13:06:00Z","title":"Promises and Pitfalls: Using Large Language Models to Generate Visualization Items","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1738","abstract":" As a step towards improving visualization literacy, this work investigates how students approach reading visualizations differently after taking a university-level visualization course. We asked students to verbally walk through their process of making sense of unfamiliar visualizations, and conducted a qualitative analysis of these walkthroughs. Our qualitative analysis found that after taking a visualization course, students engaged with visualizations in more sophisticated ways: they were more likely to exhibit design empathy by thinking critically about the tradeoffs behind why a chart was designed in a particular way, and were better able to deconstruct a chart to make sense of it. We also gave students a quantitative assessment of visualization literacy and found no evidence of scores improving after the class, likely because the test we used focused on a different set of skills than those emphasized in visualization classes. While current measurement instruments for visualization literacy are useful, we propose developing standardized assessments for additional aspects of visualization literacy, such as deconstruction and design empathy. We also suggest that these additional aspects could be incorporated more explicitly in visualization courses. All supplemental materials are available at https://osf.io/w5pum/.","accessible_pdf":false,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"maryam.hedayati@u.northwestern.edu","is_corresponding":true,"name":"Maryam Hedayati"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1738","image_caption":"Participants were randomly assigned to one of two groups. During each study session, they completed the VLAT and a walkthrough of two unfamiliar visualizations. The visualizations they saw in each session were determined by the group they were assigned to. ","keywords":["visualization literacy, visualization pedagogy, graph comprehension, visualization expertise"],"open_access_supplemental_link":"https://osf.io/w5pum/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/kg3am","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1738/v-full-1738_Preview.mp4?token=HOTx4l7qDky3PAIN5UrGhIlD6SAGmJngXtES3wC48p8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1738/v-full-1738_Preview.srt?token=EDSSAynuvqm7nWks6RBza0id9hZnB1TcnMXF4CUr2f8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-full","session_youtube_ff_id":"j5kScTwQeNk","session_youtube_ff_link":"https://youtu.be/j5kScTwQeNk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=0h12m19s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T12:42:00Z","title":"What University Students Learn In Visualization Classes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243413195","abstract":"With the growing complexity and volume of data, visualizations have become more intricate, often requiring advanced techniques to convey insights. These complex charts are prevalent in everyday life, and individuals who lack knowledge in data visualization may find them challenging to understand. This paper investigates using Large Language Models (LLMs) to help users with low data literacy understand complex visualizations. While previous studies focus on text interactions with users, we noticed that visual cues are also critical for interpreting charts. We introduce an LLM application that supports both text and visual interaction for guiding chart interpretation. Our study with 26 participants revealed that the in-situ support effectively assisted users in interpreting charts and enhanced learning by addressing specific chart-related questions and encouraging further exploration. Visual communication allowed participants to convey their interests straightforwardly, eliminating the need for textual descriptions. However, the LLM assistance led users to engage less with the system, resulting in fewer insights from the visualizations. This suggests that users, particularly those with lower data literacy and motivation, may have over-relied on the LLM agent. We discuss opportunities for deploying LLMs to enhance visualization literacy while emphasizing the need for a balanced approach.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Kiroong Choe"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chaerin Lee"},{"affiliations":"","email":"","is_corresponding":false,"name":"Soohyun Lee"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jiwon Song"},{"affiliations":"","email":"","is_corresponding":false,"name":"Aeri Cho"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nam Wook Kim"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jinwook Seo"}],"award":"","doi":"10.1109/TVCG.2024.3413195","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243413195","image_caption":"Our system allows users to interact with charts using both text and visual inputs. Users can ask questions or share visualizations, and the system will provide the current chart annotations to the LLM agent. The agent can then propose new annotations and suggest follow-up questions for deeper analysis.","keywords":["Visualization literacy, Large language model, Visual communication"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243413195/v-tvcg-20243413195_Preview.mp4?token=VRK1LBdTylEoAdQv7uQncr9J8VSmYIY9CjKfByD6vto&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243413195/v-tvcg-20243413195_Preview.srt?token=EeqHuoJJEOM3CmdAysIVV8s6o8tsaqPqAnOTVWdOZus&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-tvcg","session_youtube_ff_id":"oF7pAKfnhxo","session_youtube_ff_link":"https://youtu.be/oF7pAKfnhxo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=0h0m45s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T12:30:00Z","title":"Enhancing Data Literacy On-demand: LLMs as Guides for Novices in Chart Interpretation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1140","abstract":"Written language is a useful tool for non-visual creative activities like composing essays and planning searches. This paper investigates the integration of written language into the visualization design process. We create the idea of a 'writing rudder,' which acts as a guiding force or strategy for the designer. Via an interview study of 24 working visualization designers, we first established that only a minority of participants systematically use writingto aid in design. A second study with 15 visualization designers examined four different variants of written rudders: asking questions, stating conclusions, composing a narrative, and writing titles. Overall, participants had a positive reaction; designers recognized the benefits of explicitly writing down components of the design and indicated that they would use this approach in future design work.More specifically, two approaches - writing questions and writing conclusions/takeaways - were seen as beneficial across the design process, while writing narratives showed promise mainly for the creation stage. Although concerns around potential bias during data exploration were raised, participants also discussed strategies to mitigate such concerns. This paper contributes to a deeper understanding of the interplay between language and visualization, and proposes a straightforward, lightweight addition to the visualization design process.","accessible_pdf":false,"authors":[{"affiliations":["UC Berkeley, Berkeley, United States"],"email":"chase_stokes@berkeley.edu","is_corresponding":true,"name":"Chase Stokes"},{"affiliations":["Self, Berkeley, United States"],"email":"clarahu@berkeley.edu","is_corresponding":false,"name":"Clara Hu"},{"affiliations":["UC Berkeley, Berkeley, United States"],"email":"hearst@berkeley.edu","is_corresponding":false,"name":"Marti Hearst"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1140","image_caption":"Main findings from two interview studies. Right: number of participants who currently use writing in visualization design, and with what frequency, in each design step. Both Study 1 and Study 2 found that visualization designers rarely use writing as a concrete design step. Left: Four types of writing rudders tested in Study 2, participants ratings of each type, and examples of participant-written rudders. ","keywords":["Visualization, design, language, text"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.15959","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1140/v-full-1140_Preview.mp4?token=Oa5acEgF9yK70H6UaYyGjVl90iRXE9dQ_dGCjqjQ3x0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1140/v-full-1140_Preview.srt?token=m7laA4Mo72JmMkAMFTbu76ZbzbeERu_2BtU6v4TUddU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"ciCUI2ju3tM","session_youtube_ff_link":"https://youtu.be/ciCUI2ju3tM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=0h0m47s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T16:00:00Z","title":"\"It's a Good Idea to Put It Into Words\": Writing 'Rudders' in the Initial Stages of Visualization Design","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1342","abstract":"Genomics experts rely on visualization to extract and share insights from complex and large-scale datasets. Beyond off-the-shelf tools for data exploration, there is an increasing need for platforms that aid experts in authoring customized visualizations for both exploration and communication of insights. A variety of interactive techniques have been proposed for authoring data visualizations, such as template editing, shelf configuration, natural language input, and code editors. However, it remains unclear how genomics experts create visualizations and which techniques best support their visualization tasks and needs. To address this gap, we conducted two user studies with genomics researchers: (1) semi-structured interviews (n=20) to identify the tasks, user contexts, and current visualization authoring techniques and (2) an exploratory study (n=13) using visual probes to elicit users\u2019 intents and desired techniques when creating visualizations. Our contributions include (1) a characterization of how visualization authoring is currently utilized in genomics visualization, identifying limitations and benefits in light of common criteria for authoring tools, and (2) generalizable design implications for genomics visualization authoring tools based on our findings on task- and user-specific usefulness of authoring techniques. All supplemental materials are available at https://osf.io/bdj4v/.","accessible_pdf":false,"authors":[{"affiliations":["Eindhoven University of Technology, Eindhoven, Netherlands"],"email":"a.v.d.brandt@tue.nl","is_corresponding":true,"name":"Astrid van den Brandt"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"sehi_lyi@hms.harvard.edu","is_corresponding":false,"name":"Sehi L'Yi"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"huyen_nguyen@hms.harvard.edu","is_corresponding":false,"name":"Huyen N. Nguyen"},{"affiliations":["Eindhoven University of Technology, Eindhoven, Netherlands"],"email":"a.vilanova@tue.nl","is_corresponding":false,"name":"Anna Vilanova"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1342","image_caption":"Composite illustration summarizing key results from the two user studies. In Study 1 (n=20), we identified five personas based on interviews, characterized by three dimensions: focus, automation, and audience. In Study 2 (n=13), we collected user preferences across eight tasks (T1--T8) for six common authoring techniques: code-based, example-based, natural language input (NLI), shelf configuration, template-based, and visualization-by-demonstration (VbD).","keywords":["User interviews, visual probes, visualization authoring, genomics data visualization"],"open_access_supplemental_link":"https://osf.io/bdj4v/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/6f42j","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1342/v-full-1342_Preview.mp4?token=rhgoNF1TgG9XLA_Pd4l2NUgw86TcNAfNZPBjnPYalgE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"Tw14XEoGMAk","session_youtube_ff_link":"https://youtu.be/Tw14XEoGMAk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=1h4m54s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T17:00:00Z","title":"Understanding Visualization Authoring Techniques for Genomics Data in the Context of Personas and Tasks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1393","abstract":"This paper discusses challenges and design strategies in responsive design for thematic maps in information visualization. Thematic maps pose a number of unique challenges for responsiveness, such as inflexible aspect ratios that do not easily adapt to varying screen dimensions, or densely clustered visual elements in urban areas becoming illegible at smaller scales. However, design guidance on how to best address these issues is currently lacking. We conducted design sessions with eight professional designers and developers of web-based thematic maps for information visualization. Participants were asked to redesign a given map for various screen sizes and aspect ratios and to describe their reasoning for when and how they adapted the design. We report general observations of practitioners\u2019 motivations, decision-making processes, and personal design frameworks. We then derive seven challenges commonly encountered in responsive maps, and 17 strategies to address them, such as repositioning elements, segmenting the map, or using alternative visualizations. We compile these challenges and strategies into an illustrated cheat sheet targeted at anyone designing or learning to design responsive maps. The cheat sheet is available online: responsive-vis.github.io/map-cheat-sheet. ","accessible_pdf":false,"authors":[{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"sarah.schoettler@ed.ac.uk","is_corresponding":true,"name":"Sarah Sch\u00f6ttler"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"uhinrich@ed.ac.uk","is_corresponding":false,"name":"Uta Hinrichs"},{"affiliations":["Inria, Bordeaux, France","University of Edinburgh, Edinburgh, United Kingdom"],"email":"bbach@inf.ed.ac.uk","is_corresponding":false,"name":"Benjamin Bach"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1393","image_caption":"Challenges and design solutions for responsive thematic mapping. On the left, seven common challenges in responsive thematic maps, such as areas and symbols being too small or overlapping, are displayed. On the right, 17 possible design solutions are displayed, for example replacing the legend with annotations, separating the map into segments, or scrolling the map.","keywords":["information visualization, responsive visualization, thematic map design"],"open_access_supplemental_link":"https://responsive-vis.github.io/map-cheat-sheet/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.20735","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1393/v-full-1393_Preview.mp4?token=lGAUq2xdhP4kfeEG8qYsYRxSmjHdL97R3mzVkkDkGVI&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"mGAIwYY0AN4","session_youtube_ff_link":"https://youtu.be/mGAIwYY0AN4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=0h27m18s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T16:24:00Z","title":"Practices and Strategies in Responsive Thematic Map Design: A Report from Design Workshops with Experts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1414","abstract":"Visualization designers (e.g., journalists or data analysts) often rely on examples to explore the space of possible designs, yet we have little insight into how examples shape data visualization design outcomes. While the effects of examples have been studied in other disciplines, such as web design or engineering, the results are not readily applicable to visualization due to inconsistencies in findings and challenges unique to visualization design. Towards bridging this gap, we conduct an exploratory experiment involving 32 data visualization designers focusing on the influence of five factors (timing, quantity, diversity, data topic similarity, and data schema similarity) on objectively measurable design outcomes (e.g., numbers of designs and idea transfers). Our quantitative analysis shows that when examples are introduced after initial brainstorming, designers curate examples with topics less similar to the dataset they are working on and produce more designs with a high variation in visualization components. Also, designers copy more ideas from examples with higher data schema similarities. Our qualitative analysis of participants\u2019 thought processes provides insights into why designers incorporate examples into their designs, revealing potential factors that have not been previously investigated. Finally, we discuss how our results inform how designers may use examples during design ideation as well as future research on quantifying designs and supporting example-based visualization design. All supplemental materials are available in our OSF repo.","accessible_pdf":true,"authors":[{"affiliations":["University of Maryland, College Park, United States"],"email":"hbako@umd.edu","is_corresponding":true,"name":"Hannah K. Bako"},{"affiliations":["The University of Texas at Austin, Austin, United States"],"email":"xinyi.liu@utexas.edu","is_corresponding":false,"name":"Xinyi Liu"},{"affiliations":["University of Maryland, College Park, United States"],"email":"gko1@terpmail.umd.edu","is_corresponding":false,"name":"Grace Ko"},{"affiliations":["Human Data Interaction Lab, College Park, United States"],"email":"hsong02@cs.umd.edu","is_corresponding":false,"name":"Hyemi Song"},{"affiliations":["University of Washington, Seattle, United States"],"email":"leibatt@cs.washington.edu","is_corresponding":false,"name":"Leilani Battle"},{"affiliations":["University of Maryland, College Park, United States"],"email":"leozcliu@umd.edu","is_corresponding":false,"name":"Zhicheng Liu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1414","image_caption":"The image outlines an exploratory study investigating how the timing and properties of examples influence visualization design outcomes, highlighting key stages from task introduction to final design selection.","keywords":["data visualization, design, examples"],"open_access_supplemental_link":"https://osf.io/sbp2k/wiki/home/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1414/v-full-1414_Preview.mp4?token=F_WE7OeLcasWgVwnGc5hrSvn98z1rvAmcvHZCJkobfA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1414/v-full-1414_Preview.srt?token=i8BLTU3xi6RHAD-FotoO67SChOFvRbcLw_ls9ARIPRM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"6Nh--7IK6fw","session_youtube_ff_link":"https://youtu.be/6Nh--7IK6fw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=0h13m26s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T16:12:00Z","title":"Unveiling How Examples Shape Data Visualization Design Outcomes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1613","abstract":"We present a path-based design model and system for designing and creating visualisations. Our model represents a systematic approach to constructing visual representations of data or concepts following a predefined sequence of steps. The initial step involves outlining the overall appearance of the visualisation by creating a skeleton structure, referred to as a flowpath. Subsequently, we specify objects, visual marks, properties, and appearance, storing them in a gene. Lastly, we map data onto the flowpath, ensuring suitable morphisms. Alternative designs are created by exchanging values in the gene. For example, designs that share similar traits, are created by making small incremental changes to the gene. Our design methodology fosters the generation of diverse creative concepts, space-filling visualisations, and traditional formats like bar charts, circular plots and pie charts. Through our implementation we showcase the model in action. As an example application, we integrate the output visualisations onto a smartwatch and visualisation dashboards. In this article we (1) introduce, define and explain the path model and discuss possibilities for its use, (2) present our implementation, results, and evaluation, and (3) demonstrate and evaluate an application of its use on a mobile watch.","accessible_pdf":true,"authors":[{"affiliations":["ExaDev, Gaerwen, United Kingdom","Bangor University, Bangor, United Kingdom"],"email":"james.ogge@gmail.com","is_corresponding":false,"name":"James R Jackson"},{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"p.ritsos@bangor.ac.uk","is_corresponding":false,"name":"Panagiotis D. Ritsos"},{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"p.butcher@bangor.ac.uk","is_corresponding":false,"name":"Peter W. S. Butcher"},{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"j.c.roberts@bangor.ac.uk","is_corresponding":true,"name":"Jonathan C Roberts"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1613","image_caption":"We present a path-based design model and system for designing and creating visualisations. The image shows the Genii visualisation designer tool which demonstrates our flowpath model. Individuals define their own path or choose predefined flowpaths (left panel), drag and drop the visualisation properties into the gene panel (middle), which are rendered onto the gallery (right). Users can either create a new gene which adds a new image to the gallery or edit parameters (through drag and drop) to adapt current visualisations. Crafted visualisations can be exported and used in other applications. ","keywords":["Path-based design, Visualisation Design, Alternative Visualisations"],"open_access_supplemental_link":"https://jamesjacko.github.io/genii/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.03681","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1613/v-full-1613_Preview.mp4?token=Uhv0T5i2byoQIGD44Vxa9Jcd8h91FAHp0C8U8pLpvB4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1613/v-full-1613_Preview.srt?token=uwWiwrUqb19_M2oz4wx8ypLVaNjCs-_QJ7yLjzZdpkI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"4GP7AtRD2y4","session_youtube_ff_link":"https://youtu.be/4GP7AtRD2y4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=0h40m35s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T16:36:00Z","title":"Path-based Design Model for Constructing and Exploring Alternative Visualisations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1726","abstract":"User experience in data visualization is typically assessed through post-viewing self-reports, but these overlook the dynamic cognitive processes during interaction. This study explores the use of mind wandering- a phenomenon where attention spontaneously shifts from a primary task to internal, task-related thoughts or unrelated distractions- as a dynamic measure during visualization exploration. Participants reported mind wandering while viewing visualizations from a pre-labeled visualization database and then provided quantitative ratings of trust, engagement, and design quality, along with qualitative descriptions and short-term/long-term recall assessments. Results show that mind wandering negatively affects short-term visualization recall and various post-viewing measures, particularly for visualizations with little text annotation. Further, the type of mind wandering impacts engagement and emotional response. Mind wandering also functions as an intermediate process linking visualization design elements topost-viewing measures, influencing how viewers engage with and interpret visual information over time. Overall, this research underscores the importance of incorporating mind wandering as a dynamic measure in visualization design and evaluation, offering novel avenues for enhancing user engagement and comprehension.","accessible_pdf":true,"authors":[{"affiliations":["Arizona State University, Tempe, United States"],"email":"aarunku5@asu.edu","is_corresponding":true,"name":"Anjana Arunkumar"},{"affiliations":["Northeastern University, Boston, United States"],"email":"l.padilla@northeastern.edu","is_corresponding":false,"name":"Lace M. Padilla"},{"affiliations":["Arizona State University, Tempe, United States"],"email":"cbryan16@asu.edu","is_corresponding":false,"name":"Chris Bryan"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1726","image_caption":"While consuming data visualizations, the mind may wander, exploring diverse ideas, questions, and connections. Viewers may venture opinions on appearance and convention, report visual patterns and trends, integrate external knowledge, or engage in unrelated thoughts. Where does your mind wander and why does it matter?","keywords":["Visualization, Mind Wandering, Cognition, Engagement, Recall"],"open_access_supplemental_link":"https://osf.io/h5awt/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.03576","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1726/v-full-1726_Preview.mp4?token=qLuJG8ZPdGGFSTym-fu41zzoGoT8LM1yYZ6-jU99qiA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1726/v-full-1726_Preview.srt?token=yZe83VVDNgBVAT_iHg_uFKWEfNw8cviPFjJviitcPms&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"WuNz1VKzPLY","session_youtube_ff_link":"https://youtu.be/WuNz1VKzPLY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=0h52m2s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T16:48:00Z","title":"Mind Drifts, Data Shifts: Utilizing Mind Wandering to Track the Evolution of User Experience with Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1277","abstract":"This paper presents a novel end-to-end framework for closed-form computation and visualization of critical point uncertainty in 2D uncertain scalar fields. Critical points are fundamental topological descriptors used in the visualization and analysis of scalar fields. The uncertainty inherent in data (e.g., observational and experimental data, approximations in simulations, and compression), however, creates uncertainty regarding critical point positions. Uncertainty in critical point positions, therefore, cannot be ignored, given their impact on downstream data analysis tasks. In this work, we study uncertainty in critical points as a function of uncertainty in data modeled with probability distributions. Although Monte Carlo (MC) sampling techniques have been used in prior studies to quantify critical point uncertainty, they are often expensive and are infrequently used in production-quality visualization software. We, therefore, propose a new end-to-end framework to address these challenges that comprises a threefold contribution. First, we derive the critical point uncertainty in closed form, which is more accurate and efficient than the conventional MC sampling methods. Specifically, we provide the closed-form and semianalytical (a mix of closed-form and MC methods) solutions for parametric (e.g., uniform, Epanechnikov) and nonparametric models (e.g., histograms) with finite support. Second, we accelerate critical point probability computations using a parallel implementation with the VTK-m library, which is platform portable. Finally, we demonstrate the integration of our implementation with the ParaView software system to demonstrate near-real-time results for real datasets.","accessible_pdf":false,"authors":[{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":true,"name":"Tushar M. Athawale"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"wangz@ornl.gov","is_corresponding":false,"name":"Zhe Wang"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"pugmire@ornl.gov","is_corresponding":false,"name":"David Pugmire"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"kmorel@acm.org","is_corresponding":false,"name":"Kenneth Moreland"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"gongq@ornl.gov","is_corresponding":false,"name":"Qian Gong"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"klasky@ornl.gov","is_corresponding":false,"name":"Scott Klasky"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"paul.rosen@utah.edu","is_corresponding":false,"name":"Paul Rosen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1277","image_caption":"Critical point visualization for the climate dataset. (a) Critical points of the original data are visualized with blue spheres. (b) Noise in the data creates new critical points for which no uncertainty is visualized. (c) Critical point uncertainty is computed and visualized through elevation proportional to critical point probability. Our closed-form solutions implemented with the VTK-m library provide a 1646x speed-up compared to the conventional approach.","keywords":["Topology, uncertainty, critical points, probabilistic analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.18015v1","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1277/v-full-1277_Preview.mp4?token=FXTjHbPuB0h34s6PnN3eMIszxbIPGY02V9mpQFmdpQM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1277/v-full-1277_Preview.srt?token=XVFHuLXTdhSfIDqag5JM-FL8pQ-q7ZcB-hfwVDjcHpw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-full","session_youtube_ff_id":"kaB1IpYiCCU","session_youtube_ff_link":"https://youtu.be/kaB1IpYiCCU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=0h34m51s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T13:06:00Z","title":"Uncertainty Visualization of Critical Points of 2D Scalar Fields for Parametric and Nonparametric Probabilistic Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1461","abstract":"This paper presents a practical approach for the optimization of topological simplification, a central pre-processing step for the analysis and visualization of scalar data. Given an input scalar field f and a set of \u201csignal\u201d persistence pairs to maintain, our approaches produces an output field g that is close to f and which optimizes (i) the cancellation of \u201cnon-signal\u201d pairs, while (ii) preserving the \u201csignal\u201d pairs. In contrast to pre-existing simplification approaches, our method is not restricted to persistence pairs involving extrema and can thus address a larger class of topological features, in particular saddle pairs in three-dimensional scalar data. Our approach leverages recent generic persistence optimization frameworks and extends them with tailored accelerations specific to the problem of topological simplification. Extensive experiments report substantial accelerations over these frameworks, thereby making topological simplification optimization practical for real-life datasets. Our work enables a direct visualization and analysis of the topologically simplified data, e.g., via isosurfaces of simplified topology (fewer components and handles). We apply our approach to the extraction of prominent filament structures in three-dimensional data. Specifically, we show that our pre-simplification of the data leads to practical improvements over standard topological techniques for removing filament loops. We also show how our framework can be used to repair genus defects in surface processing. Finally, we provide a C++ implementation for reproducibility purposes.","accessible_pdf":false,"authors":[{"affiliations":["CNRS, Paris, France","SORBONNE UNIVERSITE, Paris, France"],"email":"mohamed.kissi@lip6.fr","is_corresponding":true,"name":"Mohamed KISSI"},{"affiliations":["CNRS, Paris, France","Sorbonne Universit\u00e9, Paris, France"],"email":"mathieu.pont@lip6.fr","is_corresponding":false,"name":"Mathieu Pont"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"josh@cs.arizona.edu","is_corresponding":false,"name":"Joshua A Levine"},{"affiliations":["CNRS, Paris, France","Sorbonne Universit\u00e9, Paris, France"],"email":"julien.tierny@sorbonne-universite.fr","is_corresponding":false,"name":"Julien Tierny"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1461","image_caption":"Topological simplification of a dark matter density field in a cosmology dataset. The cosmic web geometry is depicted by an isosurface at isovalue 0.4, with core filament structures extracted via upward discrete integral lines from 2-saddles above 0.4. Our approach reduced the number of undesired topological features by 92%, leading to a less cluttered visualization. This simplifies the topology, removing noisy components and small-scale handles, as shown in the inset zooms. This also results in fewer skips in persistent saddle connector reversals, revealing the primary filament structure more clearly.","keywords":["Topological Data Analysis, scalar data, simplification, feature extraction."],"open_access_supplemental_link":"https://github.com/MohamedKISSI/Code-Paper-A-Pratical-Solver-for-Scalar-Data-Topological-Simplification","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2407.12399","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1461/v-full-1461_Preview.mp4?token=8hD_BYjbZdeNNM6x4XqOfZp21w509Z-xWOKfOVEKzT0&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-full","session_youtube_ff_id":"PJ_tDek0d88","session_youtube_ff_link":"https://youtu.be/PJ_tDek0d88","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=0h25m4s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T12:54:00Z","title":"A Practical Solver for Scalar Data Topological Simplification","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1494","abstract":"Topological abstractions offer a method to summarize the behavior of vector fields, but computing them robustly can be challenging due to numerical precision issues. One alternative is to represent the vector field using a discrete approach, which constructs a collection of pairs of simplices in the input mesh that satisfies criteria introduced by Forman\u2019s discrete Morse theory. While numerous approaches exist to compute pairs in the restricted case of the gradient of a scalar field, state-of-the-art algorithms for the general case of vector fields require expensive optimization procedures. This paper introduces a fast, novel approach for pairing simplices of two-dimensional, triangulated vector fields that do not vary in time. The key insight of our approach is that we can employ a local evaluation, inspired by the approach used to construct a discrete gradient field, where every simplex in a mesh is considered by no more than one of its vertices. Specifically, we observe that for any edge in the input mesh, we can uniquely assign an outward direction of flow. We can further expand this consistent notion of outward flow at each vertex, which corresponds to the concept of a downhill flow in the case of scalar fields. Working with outward flow enables a linear-time algorithm that processes the (outward) neighborhoods of each vertex one-by-one, similar to the approach used for scalar fields. We couple our approach to constructing discrete vector fields with a method to extract, simplify, and visualize topological features. Empirical results on analytic and simulation data demonstrate drastic improvements in running time, produce features similar to the current state-of-the-art, and show the application of simplification to large, complex flows","accessible_pdf":false,"authors":[{"affiliations":["University of Arizona, Tucson, United States"],"email":"finkent@arizona.edu","is_corresponding":true,"name":"Tanner Finken"},{"affiliations":["Sorbonne Universit\u00e9, Paris, France"],"email":"julien.tierny@sorbonne-universite.fr","is_corresponding":false,"name":"Julien Tierny"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"josh@cs.arizona.edu","is_corresponding":false,"name":"Joshua A Levine"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1494","image_caption":"We extract and simplify a vector field of ocean currents using our technique. The input mesh has over 48 million simplices, and the original flow results in over 65000 critical points. We simplify to approximately 2000 critical points using a discrete representation of the field. Computing the original field for a domain this big takes only 4 minutes and computing complete simplification takes approximately 10 minutes.","keywords":["Flow visualization, discrete Morse theory, topological data analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2408.04769","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1494/v-full-1494_Preview.mp4?token=usx194xYVR8-djPko3ohF_0PYny_K5RbH2x0Fqx0ZFw&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-full","session_youtube_ff_id":"OzB9wNzCmRc","session_youtube_ff_link":"https://youtu.be/OzB9wNzCmRc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=0h13m20s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T12:42:00Z","title":"Localized Evaluation for Constructing Discrete Vector Fields","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1574","abstract":"The numerical extraction of vortex cores from time-dependent fluid flow attracted much attention over the past decades. A commonly agreed upon vortex definition remained elusive since a proper vortex core needs to satisfy two hard constraints: it must be objective and Lagrangian. Recent methods on objectivization met the first but not the second constraint, since there was no formal guarantee that the resulting vortex coreline is indeed a pathline of the fluid flow. In this paper, we propose the first vortex core definition that is both objective and Lagrangian. Our approach restricts observer motions to follow along pathlines, which reduces the degrees of freedoms: we only need to optimize for an observer rotation that makes the observed flow as steady as possible. This optimization succeeds along Lagrangian vortex corelines and will result in a non-zero time-partial everywhere else. By performing this optimization at each point of a spatial grid, we obtain a residual scalar field, which we call vortex deviation error. The local minima on the grid serve as seed points for a gradient descent optimization that delivers sub-voxel accurate corelines. The visualization of both 2D and 3D vortex cores is based on the separation of the movement of the vortex core and the swirling flow behavior around it. While the vortex core is represented by a pathline, the swirling motion around it is visualized by streamlines in the correct frame. We demonstrate the utility of the approach on several 2D and 3D time-dependent vector fields.","accessible_pdf":false,"authors":[{"affiliations":["Friedrich-Alexander-University Erlangen-N\u00fcrnberg, Erlangen, Germany"],"email":"tobias.guenther@fau.de","is_corresponding":true,"name":"Tobias G\u00fcnther"},{"affiliations":["University of Magdeburg, Magdeburg, Germany"],"email":"theisel@ovgu.de","is_corresponding":false,"name":"Holger Theisel"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1574","image_caption":"In this paper, we present the first finite-time approach that extracts objective vortex corelines, which are guaranteed to be pathlines of the underlying flow. Our key idea is to restrict the motion of the observer to always follow along particle trajectories, which incidentally also reduces the degrees of freedom in the reference frame optimization. We derive the method for 2D and 3D time-dependent flow.","keywords":["Flow visualization, vortices, objective methods"],"open_access_supplemental_link":"https://doi.org/10.5281/zenodo.12750719","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1574/v-full-1574_Preview.mp4?token=1dzHZmqY0ScY43iHcq0r_TadN7TuEBg0fKQ4q1NmftE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1574/v-full-1574_Preview.srt?token=fVx8Bc3PS0li54gRmGAIQ0fnF-lBMxgiFhzZIe4XS2M&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-full","session_youtube_ff_id":"uzDwMGgfoLE","session_youtube_ff_link":"https://youtu.be/uzDwMGgfoLE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=0h0m28s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T12:30:00Z","title":"Objective Lagrangian Vortex Cores and their Visual Representations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1163","abstract":"Integral curves have been widely used to represent and analyze various vector fields. In this paper, we propose a Curve Segment Neighborhood Graph (CSNG) to capture the relationships between neighboring curve segments. This graph representation enables us to adapt the fast community detection algorithm, i.e., the Louvain algorithm, to identify individual graph communities from CSNG. Our results show that these communities often correspond to the features of the flow. To achieve a multi-level interactive exploration of the detected communities, we adapt a force-directed layout that allows users to refine and re-group communities based on their domain knowledge. We incorporate the proposed techniques into an interactive system to enable effective analysis and interpretation of complex patterns in large-scale integral curve datasets.","accessible_pdf":false,"authors":[{"affiliations":["University of Houston, Houston, United States"],"email":"nguyenpkk95@gmail.com","is_corresponding":true,"name":"Nguyen K Phan"},{"affiliations":["University of Houston, Houston, United States"],"email":"chengu@cs.uh.edu","is_corresponding":false,"name":"Guoning Chen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1163","image_caption":"A visualization of the (1) 3D streamlines dataset of the Solar Plume dataset on the left side, color-coded by their respective communities and (2) the community force-directed graph created using Louvain community detection at resolution = 0.7 on the right side.","keywords":["Vector field, neighbor search, community detection"],"open_access_supplemental_link":"https://github.com/MangoLion/CSN_VIS","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1163/v-short-1163_Preview.mp4?token=y8Q6ujlvBsJmxc_2_KRHXnVLngAMr1C9sm2ecSiBoS8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1163/v-short-1163_Preview.srt?token=DaGRuL20pAfTZbKmqrGbCQYb42f72kTxmCOSBU7UtZk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-short","session_youtube_ff_id":"5HB_dbyxo_Q","session_youtube_ff_link":"https://youtu.be/5HB_dbyxo_Q","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=1h2m16s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T13:30:00Z","title":"Curve Segment Neighborhood-based Vector Field Exploration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243350076","abstract":"Ensembles of contours arise in various applications like simulation, computer-aided design, and semantic segmentation. Uncovering ensemble patterns and analyzing individual members is a challenging task that suffers from clutter. Ensemble statistical summarization can alleviate this issue by permitting analyzing ensembles' distributional components like the mean and median, confidence intervals, and outliers. Contour boxplots, powered by Contour Band Depth (CBD), are a popular non-parametric ensemble summarization method that benefits from CBD's generality, robustness, and theoretical properties. In this work, we introduce Inclusion Depth (ID), a new notion of contour depth with three defining characteristics. First, ID is a generalization of functional Half-Region Depth, which offers several theoretical guarantees. Second, ID relies on a simple principle: the inside/outside relationships between contours. This facilitates implementing ID and understanding its results. Third, the computational complexity of ID scales quadratically in the number of members of the ensemble, improving CBD's cubic complexity. This also in practice speeds up the computation enabling the use of ID for exploring large contour ensembles or in contexts requiring multiple depth evaluations like clustering. In a series of experiments on synthetic data and case studies with meteorological and segmentation data, we evaluate ID's performance and demonstrate its capabilities for the visual analysis of contour ensembles.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Nicolas F. Chaves-de-Plaza"},{"affiliations":"","email":"","is_corresponding":false,"name":"Prerak Mody"},{"affiliations":"","email":"","is_corresponding":false,"name":"Marius Staring"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ren\u00e9 van Egmond"},{"affiliations":"","email":"","is_corresponding":false,"name":"Anna Vilanova"},{"affiliations":"","email":"","is_corresponding":false,"name":"Klaus Hildebrandt"}],"award":"","doi":"10.1109/TVCG.2024.3350076","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243350076","image_caption":"Inclusion Depth is a new contour depth notion that uses inside/outside relationships between contours to compute their depth significantly faster than existing methods like Contour Band Depth. Use the QR code to explore the Contour Depth Python library!","keywords":["Uncertainty visualization, contours, ensemble summarization, depth statistics."],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243350076/v-tvcg-20243350076_Preview.mp4?token=Bwl1l3rFMHAWUZgDBi1MFY8QhCk4fngkcd9WIpAIqBo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243350076/v-tvcg-20243350076_Preview.srt?token=3vy829ZDAcuAPjymyRN_V43xHj56G3yrezptt-DHLmU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-tvcg","session_youtube_ff_id":"IkbcvwKb3Ic","session_youtube_ff_link":"https://youtu.be/IkbcvwKb3Ic","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=0h49m11s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T13:18:00Z","title":"Inclusion Depth for Contour Ensembles","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1155","abstract":"Interactive visualizations are powerful tools for Exploratory Data Analysis (EDA), but how do they affect the observations analysts make about their data? We conducted a qualitative experiment with 13 professional data scientists analyzing two datasets with Jupyter notebooks, collecting a rich dataset of interaction traces and think-aloud utterances. By qualitatively coding participant utterances, we introduce a formalism that describes EDA as a sequence of analysis states, where each state is comprised of either a representation an analyst constructs (e.g., the output of a data frame, an interactive visualization, etc.) or an observation the analyst makes (e.g., about missing data, the relationship between variables, etc.). By applying our formalism to our dataset, we identify that interactive visualizations, on average, lead to earlier and more complex insights about relationships between dataset attributes compared to static visualizations. Moreover, by calculating metrics such as revisit count and representational diversity, we uncover that some representations serve more as \"planning aids\" during EDA rather than tools strictly for hypothesis-answering. We show how these measures help identify other patterns of analysis behavior, such as the \"80-20 rule\", where a small subset of representations drove the majority of observations. Based on these findings, we offer design guidelines for interactive exploratory analysis tooling and reflect on future directions for studying the role that visualizations play in EDA. ","accessible_pdf":true,"authors":[{"affiliations":["MIT, Cambridge, United States"],"email":"dwootton@mit.edu","is_corresponding":true,"name":"Dylan Wootton"},{"affiliations":["MIT, Cambridge, United States"],"email":"amyraefoxphd@gmail.com","is_corresponding":false,"name":"Amy Rae Fox"},{"affiliations":["University of Colorado Boulder, Boulder, United States"],"email":"evan.peck@colorado.edu","is_corresponding":false,"name":"Evan Peck"},{"affiliations":["MIT, Cambridge, United States"],"email":"arvindsatya@mit.edu","is_corresponding":false,"name":"Arvind Satyanarayan"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1155","image_caption":"A diagram illustrating a mixed-methods study of Exploratory Data Analysis (EDA) practices. The left section shows 13 data scientists conducting two EDAs, first with static charts, then with static and interactive charts. Think-aloud utterances and interaction traces are collected from these sessions. The middle section depicts how this data is processed: utterances are coded via content analysis to create observations, which are combined with interaction data to form a comprehensive dataset of EDA sessions. EDA metrics such as revisit rate and hover time are computed from this dataset. The right section demonstrates a formal description of EDA sessions, showing examples of how participants' actions and observations are encoded, including creating visualizations, commenting on distributions, and identifying relationships using various chart types. This systematic approach combines qualitative data collection with quantitative analysis to provide insights into EDA behaviors and strategies.","keywords":["Interaction Design, Methodologies, HumanQual, HumanQuant."],"open_access_supplemental_link":"https://osf.io/bu7je/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1155/v-full-1155_Preview.srt?token=BZU6Pn7B-4AbZfqQ-GNhRkm97YTfS1r2nPHEQrnbDhg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-full","session_youtube_ff_id":"CNQni-VZ4FI","session_youtube_ff_link":"https://youtu.be/CNQni-VZ4FI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h0m30s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T17:45:00Z","title":"Charting EDA: How Visualizations and Interactions Shape Analysis in Computational Notebooks.","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1204","abstract":"We present ProvenanceWidgets, a Javascript library of UI control elements such as radio buttons, checkboxes, and dropdowns to track and dynamically overlay a user's analytic provenance. These in situ overlays not only save screen space but also minimize the amount of time and effort needed to access the same information from elsewhere in the UI. In this paper, we discuss how we design modular UI control elements to track how often and how recently a user interacts with them and design visual overlays showing an aggregated summary as well as a detailed temporal history. We demonstrate the capability of ProvenanceWidgets by recreating three prior widget libraries: (1) Scented Widgets, (2) Phosphor objects, and (3) Dynamic Query Widgets. We also evaluated its expressiveness and conducted case studies with visualization developers to evaluate its effectiveness. We find that ProvenanceWidgets enables developers to implement custom provenance-tracking applications effectively. ProvenanceWidgets is available as open-source software at https://github.com/ProvenanceWidgets to help application developers build custom provenance-based systems.","accessible_pdf":true,"authors":[{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"arpitnarechania@gatech.edu","is_corresponding":true,"name":"Arpit Narechania"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"kaustubhodak1@gmail.com","is_corresponding":false,"name":"Kaustubh Odak"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"melassady@ai.ethz.ch","is_corresponding":false,"name":"Mennatallah El-Assady"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"endert@gatech.edu","is_corresponding":false,"name":"Alex Endert"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1204","image_caption":"ProvenanceWidgets is a new open-source JavaScript library of UI controls such as range sliders and dropdowns to track and dynamically overlay analytic provenance. Install it as \"npm install provenance-widgets\".","keywords":["Provenance, Analytic provenance, Visualization, UI controls, GUI elements, JavaScript library."],"open_access_supplemental_link":"https://github.com/ProvenanceWidgets/Supplemental-Material","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2407.17431","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1204/v-full-1204_Preview.mp4?token=NcgZCl_jM23zSL86QflPmp1ABUfon9PW1Ai_9rwx-P0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1204/v-full-1204_Preview.srt?token=u4yM076oyfVu5TTGXjNWLCvxEI_pJKmW42rfn9048AU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-full","session_youtube_ff_id":"Ed1cZDTTFd0","session_youtube_ff_link":"https://youtu.be/Ed1cZDTTFd0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h58m10s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T18:45:00Z","title":"ProvenanceWidgets: A Library of UI Control Elements to Track and Dynamically Overlay Analytic Provenance","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1251","abstract":"Exploratory data science is an iterative process of obtaining, cleaning, profiling, analyzing, and interpreting data. This cyclical way of working creates challenges within the linear structure of computational notebooks, leading to issues with code quality, recall, and reproducibility. To remedy this, we present Loops, a set of visual support techniques for iterative and exploratory data analysis in computational notebooks. Loops leverages provenance information to visualize the impact of changes made within a notebook. In visualizations of the notebook provenance, we trace the evolution of the notebook over time and highlight differences between versions. Loops visualizes the provenance of code, markdown, tables, visualizations, and images and their respective differences. Analysts can explore these differences in detail in a separate view. Loops not only makes the analysis process transparent but also supports analysts in their data science work by showing the effects of changes and facilitating comparison of multiple versions. We demonstrate our approach's utility and potential impact in two use cases and feedback from notebook users from various backgrounds. This paper and all supplemental materials are available at https://osf.io/79eyn.","accessible_pdf":false,"authors":[{"affiliations":["Johannes Kepler University Linz, Linz, Austria"],"email":"klaus@eckelt.info","is_corresponding":true,"name":"Klaus Eckelt"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"kirangadhave2@gmail.com","is_corresponding":false,"name":"Kiran Gadhave"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"alex@sci.utah.edu","is_corresponding":false,"name":"Alexander Lex"},{"affiliations":["Johannes Kepler University Linz, Linz, Austria"],"email":"marc.streit@jku.at","is_corresponding":false,"name":"Marc Streit"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1251","image_caption":"Loops tracks and visualizes the provenance of computational notebooks. Compact and detailed visualizations of the notebook's history trace the evolution of the notebook over time and highlight differences between versions. Loops visualizes the provenance of code, markdown, tables, visualizations, and images and can explicitly encode their differences.","keywords":["Comparative visualization, computational notebooks, provenance, data science"],"open_access_supplemental_link":"https://osf.io/hxuak/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/79eyn","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1251/v-full-1251_Preview.mp4?token=hfiHy_SBFkr3Cp8zrhAzomuDkTB9gV-gF67EgA19M08&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-full","session_youtube_ff_id":"2l7HgOd2NIY","session_youtube_ff_link":"https://youtu.be/2l7HgOd2NIY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h24m28s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T18:09:00Z","title":"Loops: Leveraging Provenance and Visualization to Support Exploratory Data Analysis in Notebooks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1730","abstract":"Understanding the input and output of data wrangling scripts is crucial for various tasks like debugging code and onboarding new data. However, existing research on script understanding primarily focuses on revealing the process of data transformations, lacking the ability to analyze the potential scope, i.e., the space of script inputs and outputs. Meanwhile, constructing input/output space during script analysis is challenging, as the wrangling scripts could be semantically complex and diverse, and the association between different data objects is intricate. To facilitate data workers in understanding the input and output space of wrangling scripts, we summarize ten types of constraints to express table space and build a mapping between data transformations and these constraints to guide the construction of the input/output for individual transformations. Then, we propose a constraint generation model for integrating table constraints across multiple transformations. Based on the model, we develop Ferry, an interactive system that extracts and visualizes the data constraints describing the input and output space of data wrangling scripts, thereby enabling users to grasp the high-level semantics of complex scripts and locate the origins of faulty data transformations. Besides, Ferry provides example input and output data to assist users in interpreting the extracted constraints and checking and resolving the conflicts between these constraints and any uploaded dataset. Ferry\u2019s effectiveness and usability are evaluated through two usage scenarios and two case studies, including understanding, debugging, and checking both single and multiple scripts, with and without executable data. Furthermore, an illustrative application is presented to demonstrate Ferry\u2019s flexibility.","accessible_pdf":false,"authors":[{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"rickyluozs@gmail.com","is_corresponding":true,"name":"Zhongsu Luo"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"kaixiong@zju.edu.cn","is_corresponding":false,"name":"Kai Xiong"},{"affiliations":["Zhejiang University, Hangzhou,Zhejiang, China"],"email":"3220105578@zju.edu.cn","is_corresponding":false,"name":"Jiajun Zhu"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"chenran928@zju.edu.cn","is_corresponding":false,"name":"Ran Chen"},{"affiliations":["Newcastle University, Newcastle Upon Tyne, United Kingdom"],"email":"xinhuan.shu@gmail.com","is_corresponding":false,"name":"Xinhuan Shu"},{"affiliations":["Zhejiang University, Ningbo, China"],"email":"dweng@zju.edu.cn","is_corresponding":false,"name":"Di Weng"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1730","image_caption":"The user interface of Ferry. Ferry is an interactive system that uses a constraint-based approach to help data workers understand the input/output space of data wrangling scripts. It aids in comprehending this space through constraint icon and constraint tag, combined with sample data. Additionally, Ferry detects conflicts between requirements and scripts, facilitating efficient scripts reuse and debugging.","keywords":["Data wrangling, Visual analytics, Constraints, Program understanding"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1730/v-full-1730_Preview.mp4?token=Op3_qrWX97c6NMeKmmpmOyBBKivPzJrkuUPEMAy2z2M&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1730/v-full-1730_Preview.srt?token=WwUEr4W9Iji8zAoBGgwCuRJr23SJXZt2SGzLiIiVuPY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-full","session_youtube_ff_id":"C0yhkKGlj7k","session_youtube_ff_link":"https://youtu.be/C0yhkKGlj7k","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h12m55s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T17:57:00Z","title":"Ferry: Toward Better Understanding of Input/Output Space for Data Wrangling Scripts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1830","abstract":"Over the past decade, several urban visual analytics systems and tools have been proposed to tackle a host of challenges faced by cities, in areas as diverse as transportation, weather, and real estate. Many of these tools have been designed through collaborations with urban experts, aiming to distill intricate urban analysis workflows into interactive visualizations and interfaces. However, the design, implementation, and practical use of these tools still rely on siloed approaches, resulting in bespoke applications that are difficult to reproduce and extend. At the design level, these tools undervalue rich data workflows from urban experts, typically treating them only as data providers and evaluators. At the implementation level, they lack interoperability with other technical frameworks. At the practical use level, they tend to be narrowly focused on specific fields, inadvertently creating barriers to cross-domain collaboration. To address these gaps, we present Curio, a framework for collaborative urban visual analytics. Curio uses a dataflow model with multiple abstraction levels (code, grammar, GUI elements) to facilitate collaboration across the design and implementation of visual analytics components. The framework allows experts to intertwine data preprocessing, management, and visualization stages while tracking the provenance of code and visualizations. In collaboration with urban experts, we evaluate Curio through a diverse set of usage scenarios targeting urban accessibility, urban microclimate, and sunlight access. These scenarios use different types of data and domain methodologies to illustrate Curio's flexibility in tackling pressing societal challenges. Curio is available at https://urbantk.org/curio.","accessible_pdf":false,"authors":[{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"gmorei3@uic.edu","is_corresponding":true,"name":"Gustavo Moreira"},{"affiliations":["University of California, Berkeley, Berkeley, United States","Massachusetts Institute of Technology , Somerville, United States"],"email":"maryamh@mit.edu","is_corresponding":false,"name":"Maryam Hosseini"},{"affiliations":["University of Illinois Urbana-Champaign, Urbana-Champaign, United States"],"email":"carolvfs@illinois.edu","is_corresponding":false,"name":"Carolina Veiga"},{"affiliations":["Universidade Federal Fluminense, Niteroi, Brazil"],"email":"lucasalexandre.s.cc@gmail.com","is_corresponding":false,"name":"Lucas Alexandre"},{"affiliations":["Politecnico di Milano, Milano, Italy"],"email":"nicola.colaninno@polimi.it","is_corresponding":false,"name":"Nicola Colaninno"},{"affiliations":["Universidade Federal Fluminense, Niter\u00f3i, Brazil"],"email":"danielcmo@ic.uff.br","is_corresponding":false,"name":"Daniel de Oliveira"},{"affiliations":["Universidade Federal de Pernambuco, Recife, Brazil"],"email":"nivan@cin.ufpe.br","is_corresponding":false,"name":"Nivan Ferreira"},{"affiliations":["Universidade Federal Fluminense , Niteroi, Brazil"],"email":"mlage@ic.uff.br","is_corresponding":false,"name":"Marcos Lage"},{"affiliations":["University of Illinois Chicago, Chicago, United States"],"email":"fabiom@uic.edu","is_corresponding":false,"name":"Fabio Miranda"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1830","image_caption":"The rise of urban data rise has led experts to address societal challenges using data-driven methods. Yet, effective analysis requires diverse resources and complex workflows. Current tools like urban visual analytics applications and computational notebooks often fall short. To address these challenges, we propose Curio, a provenance-aware collaborative framework for urban visual analytics. Curio allows users to build and iterate on dataflows with reusable modules, supporting collaborative design and tracking of changes. We evaluated Curio with domain experts through a set of case studies focusing on urban accessibility, climate, and sunlight access.","keywords":["Urban analytics, urban data, spatial data, dataflow, provenance, visualization framework, visualization system"],"open_access_supplemental_link":"https://urbantk.org/curio","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.06139","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1830/v-full-1830_Preview.mp4?token=PMw01OvmxRM3Dmn59e5pWrPncdeYM4gUN3dS9qUjibM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1830/v-full-1830_Preview.srt?token=xH83hAsr19T2OlPPJKG6USmJSlzqV9swdUgxjw-rkPY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-full","session_youtube_ff_id":"phFXjrH7_ns","session_youtube_ff_link":"https://youtu.be/phFXjrH7_ns","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h47m45s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T18:33:00Z","title":"Curio: A Dataflow-Based Framework for Collaborative Urban Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243354561","abstract":"Interactive visualization can support fluid exploration but is often limited to predetermined tasks. Scripting can support a vast range of queries but may be more cumbersome for free-form exploration. Embedding interactive visualization in scripting environments, such as computational notebooks, provides an opportunity to leverage the strengths of both direct manipulation and scripting. We investigate interactive visualization design methodology, choices, and strategies under this paradigm through a design study of calling context trees used in performance analysis, a field which exemplifies typical exploratory data analysis workflows with big data and hard to define problems. We first produce a formal task analysis assigning tasks to graphical or scripting contexts based on their specificity, frequency, and suitability. We then design a notebook-embedded interactive visualization and validate it with intended users. In a follow-up study, we present participants with multiple graphical and scripting interaction modes to elicit feedback about notebook-embedded visualization design, finding consensus in support of the interaction model. We report and reflect on observations regarding the process and design implications for combining visualization and scripting in notebooks.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Connor Scully-Allison"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ian Lumsden"},{"affiliations":"","email":"","is_corresponding":false,"name":"Katy Williams"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jesse Bartels"},{"affiliations":"","email":"","is_corresponding":false,"name":"Michela Taufer"},{"affiliations":"","email":"","is_corresponding":false,"name":"Stephanie Brink"},{"affiliations":"","email":"","is_corresponding":false,"name":"Abhinav Bhatele"},{"affiliations":"","email":"","is_corresponding":false,"name":"Olga Pearce"},{"affiliations":"","email":"","is_corresponding":false,"name":"Katherine E. Isaacs"}],"award":"","doi":"10.1109/TVCG.2024.3354561","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243354561","image_caption":"Our model for assigning tasks to interactive visualization or scripting modalities when designing notebook embedded visualizations. Task frequency and specificity inform preferred modalities. Highly specific tasks, such as complex queries with precise numbers can be assigned to scripting as they offered expressivity and efficiency to scripting-familiar audience over complex visual interfaces. Less-specific, more frequent tasks like finding anomalies can be assigned to visualization as they supports multiple forms of recognition and browsing. We note many tasks can be supported by both, with a hand-off as the analysis grows from more exploratory to more concrete.","keywords":["Exploratory Data Analysis, Interactive Data Analysis, Computational Notebooks, Hybrid Visualization-Scripting, Visualization Design"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243354561/v-tvcg-20243354561_Preview.mp4?token=9Ez-32rWEPjm7Uv5glU87qDJ3Y9tQUzKfRHrq9Xynvc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243354561/v-tvcg-20243354561_Preview.srt?token=H5SXYSVTxWe2GR7CEQohT2N1uMy5Hcdk_oVGrP_u0Gg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-tvcg","session_youtube_ff_id":"67Um_JEdwEk","session_youtube_ff_link":"https://youtu.be/67Um_JEdwEk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h37m20s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T18:21:00Z","title":"Design Concerns for Integrated Scripting and Interactive Visualization in Notebook Environments","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1218","abstract":"Placing text labels is a common way to explain key elements in a given scene. Given a graphic input and original label information, how to place labels to meet both geometric and aesthetic requirements is an open challenging problem. Geometry-wise, traditional rule-driven solutions struggle to capture the complex interactions between labels, let alone consider graphical/appearance content. In terms of aesthetics, training/evaluation data ideally require nontrivial effort and expertise in design, thus resulting in a lack of decent datasets for learning-based methods. To address the above challenges, we formulate the task with a graph representation, where nodes correspond to labels and edges to interactions between labels, and treat label placement as a node position prediction problem. With this novel representation, we design a Label Placement Graph Transformer (LPGT) to predict label positions. Specifically, edge-level attention, conditioned on node representations, is introduced to reveal potential relationships between labels. To integrate graphic/image information, we design a feature aligning strategy that extracts deep features for nodes and edges efficiently. Next, to address the dataset issue, we collect commercial illustrations with professionally designed label layouts from household appliance manuals, and annotate them with useful information to create a novel dataset named the Appliance Manual Illustration Labels (AMIL) dataset. In the thorough evaluation on AMIL, our LPGT solution achieves promising label placement performance compared with popular baselines. Our algorithm is available at https://github.com/JingweiQu/LPGT.","accessible_pdf":false,"authors":[{"affiliations":["Southwest University, Beibei, China"],"email":"qujingwei@swu.edu.cn","is_corresponding":true,"name":"Jingwei Qu"},{"affiliations":["Southwest University, Chongqing, China"],"email":"z2211973606@email.swu.edu.cn","is_corresponding":false,"name":"Pingshun Zhang"},{"affiliations":["Southwest University, Beibei, China"],"email":"enyuche@gmail.com","is_corresponding":false,"name":"Enyu Che"},{"affiliations":["COLLEGE OF COMPUTER AND INFORMATION SCIENCE, SOUTHWEST UNIVERSITY SCHOOL OF SOFTWAREC, Chongqin, China"],"email":"out1147205215@outlook.com","is_corresponding":false,"name":"Yinan Chen"},{"affiliations":["Stony Brook University, New York, United States"],"email":"hling@cs.stonybrook.edu","is_corresponding":false,"name":"Haibin Ling"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1218","image_caption":"GNN-driven label placement. For a set of labels to be placed in a graphic, Label Placement Graph Transformer (LPGT) predicts the label layout given the graphic and raw label information. First, a complete graph is constructed to capture the relationship between labels. Its node and edge features are generated from the label information and image features. Next, given the graph as input, LPGT iteratively learns the displacements of the nodes by a sequence of GNN modules. The graph is updated by each module and taken as input for the next module.","keywords":["Label placement, Graph neural network, Transformer"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1218/v-full-1218_Preview.mp4?token=V_BqaYvYHAVkbDe2N4Q-tw9rdfS_5RQGXLTQGgkspUk&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-full","session_youtube_ff_id":"CrX4jHVmDfU","session_youtube_ff_link":"https://youtu.be/CrX4jHVmDfU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=0h48m12s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T15:03:00Z","title":"Graph Transformer for Label Placement","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1394","abstract":"This paper presents discursive patinas, a technique to visualize discussions onto data visualizations, inspired by how people leave traces in the physical world. While data visualizations are widely discussed in online communities and social media, comments tend to be displayed separately from the visualization and we lack ways to relate these discussions back to the content of the visualization, e.g., to situate comments, explain visual patterns, or question assumptions. In our visualization annotation interface, users can designate areas within the visualization. Discursive patinas are made of overlaid visual marks (anchors), attached to textual comments with category labels, likes, and replies. By coloring and styling the anchors, a meta visualization emerges, showing what and where people comment and annotate the visualization. These patinas show regions of heavy discussions, recent commenting activity, and the distribution of questions, suggestions, or personal stories. We ran workshops with 90 students, domain experts, and visualization researchers to study how people use anchors to discuss visualizations and how patinas influence people's understanding of the discussion. Our results show that discursive patinas improve the ability to navigate discussions and guide people to comments that help understand, contextualize, or scrutinize the visualization. We discuss the potential of anchors and patinas to support discursive engagements, including critical readings of visualizations, design feedback, and feminist approaches to data visualization.","accessible_pdf":true,"authors":[{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom","Potsdam University of Applied Sciences, Potsdam, Germany"],"email":"tobias.kauer@fh-potsdam.de","is_corresponding":true,"name":"Tobias Kauer"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"derya.akbaba@liu.se","is_corresponding":false,"name":"Derya Akbaba"},{"affiliations":["University of Applied Sciences Potsdam, Potsdam, Germany"],"email":"doerk@fh-potsdam.de","is_corresponding":false,"name":"Marian D\u00f6rk"},{"affiliations":["Inria, Bordeaux, France","University of Edinburgh, Edinburgh, United Kingdom"],"email":"bbach@inf.ed.ac.uk","is_corresponding":false,"name":"Benjamin Bach"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1394","image_caption":"Discursive Patinas present a new technique that visualizes discussions about visualizations, inspired by traces left in the physical world","keywords":["Data Visualization, Discussion, Annotation"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.17994","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1394/v-full-1394_Preview.mp4?token=CBABLCrD8Q-YwP3M7gFn4tKU58vJoBc0VguQ7DYvcRM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1394/v-full-1394_Preview.srt?token=Aie2P6umyj0_cHVPgHwnqva_Uf-UOJIhrGytP-lXn_o&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-full","session_youtube_ff_id":"zBwtliqYULc","session_youtube_ff_link":"https://youtu.be/zBwtliqYULc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=0h0m48s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T14:15:00Z","title":"Discursive Patinas: Anchoring Discussions in Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1502","abstract":"Sketching is a common practice among visualization designers, and an approachable entry to visualizations for individuals, but moving from a sketch to a full fledged data visualization often requires throwing away the original sketch recreating it from scratch. We aim to instead formalize thesesketches, enabling them to support iteration and systematic data mapping through a visual-first templating workflow. In this workflow, authors sketch a representative visualization and structure it into an expressive template for an envisioned or partial dataset, capturing implicit style as well as explicit data mappings. In order to demonstrate and evaluate our proposed workflow, we implement DataGarden, and evaluate it through a reproduction and a freeform study. We discuss how DataGarden supports personal expression, and delve into the variety of visualizations that authors can produce with it, identifying cases which demonstrate the limitations of our approach and discuss avenues for future work.","accessible_pdf":false,"authors":[{"affiliations":["Universit\u00e9 Paris-Saclay, Orsay, France"],"email":"anna.offenwanger@gmail.com","is_corresponding":true,"name":"Anna Offenwanger"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Inria, LISN, Orsay, France"],"email":"theophanis.tsandilas@inria.fr","is_corresponding":false,"name":"Theophanis Tsandilas"},{"affiliations":["University of Toronto, Toronto, Canada"],"email":"fanny@dgp.toronto.edu","is_corresponding":false,"name":"Fanny Chevalier"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1502","image_caption":"DataGarden supports sketching personal, expressive designs and formalizing these as structured visualization templates. To express (A) a visualization design idea, a user sketches a few representative glyphs in (B) the canvas, making their vision explicit. DataGarden provides the means to structure the freeform sketch into a visualization template by (C) capturing implicit style and explicit data mappings via user interaction and machine support.","keywords":["Personal Visualization, Visualization template, Sketch input, Sketch-based visualization, Visualization by-example"],"open_access_supplemental_link":"https://datagarden-git.github.io/datagarden/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-04664470/","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1502/v-full-1502_Preview.mp4?token=w4CD-laAbkqYIpzNABukxJTmJ15CNB136u4pl5pvCeg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1502/v-full-1502_Preview.srt?token=JB23hpFMZEeWtz5LvaymlOgJV0mIBGDhUhcpOdvAw60&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-full","session_youtube_ff_id":"IFG97n_gi0g","session_youtube_ff_link":"https://youtu.be/IFG97n_gi0g","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=1h0m54s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T15:15:00Z","title":"DataGarden: Formalizing Personal Sketches into Structured Visualization Templates","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233345340","abstract":"Label quality issues, such as noisy labels and imbalanced class distributions, have negative effects on model performance. Automatic reweighting methods identify problematic samples with label quality issues by recognizing their negative effects on validation samples and assigning lower weights to them. However, these methods fail to achieve satisfactory performance when the validation samples are of low quality. To tackle this, we develop Reweighter, a visual analysis tool for sample reweighting. The reweighting relationships between validation samples and training samples are modeled as a bipartite graph. Based on this graph, a validation sample improvement method is developed to improve the quality of validation samples. Since the automatic improvement may not always be perfect, a co-cluster-based bipartite graph visualization is developed to illustrate the reweighting relationships and support the interactive adjustments to validation samples and reweighting results. The adjustments are converted into the constraints of the validation sample improvement method to further improve validation samples. We demonstrate the effectiveness of Reweighter in improving reweighting results through quantitative evaluation and two case studies.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Weikai Yang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yukai Guo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jing Wu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zheng Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lan-Zhe Guo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yu-Feng Li"},{"affiliations":"","email":"","is_corresponding":false,"name":"Shixia Liu"}],"award":"","doi":"10.1109/TVCG.2023.3345340","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233345340","image_caption":"Reweightor: (a) The reweighting relationships between 3 (out of 14) validation sample clusters and 6 (out of 35) training sample clusters. V1 and V2 contain low-quality validation samples, resulting in many inconsistent training samples in S1 and S2. (b) After correcting the noisy labels of low-quality validation samples, increasing the weights of high-quality validation samples, and verifying inconsistent training samples, the reweighting results are improved (S''1 and S'2).","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2312.05067","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233345340/v-tvcg-20233345340_Preview.mp4?token=VcRo6hbin8uXz7KmnTsmVvNa8mU_S8VAGYC-BJIeQhc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233345340/v-tvcg-20233345340_Preview.srt?token=xjZQ2cID1hWuPw7zn9Kzqgwk49mOw6WoCKYnM3tYuPY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-tvcg","session_youtube_ff_id":"bW_5eDLbNng","session_youtube_ff_link":"https://youtu.be/bW_5eDLbNng","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=0h36m58s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T14:51:00Z","title":"Interactive Reweighting for Mitigating Label Quality Issues","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243392476","abstract":"Areas of interest (AOIs) are well-established means of providing semantic information for visualizing, analyzing, and classifying gaze data. However, the usual manual annotation of AOIs is time consuming and further impaired by ambiguities in label assignments. To address these issues, we present an interactive labeling approach that combines visualization, machine learning, and user-centered explainable annotation. Our system provides uncertainty-aware visualization to build trust in classification with an increasing number of annotated examples. It combines specifically designed EyeFlower glyphs, dimensionality reduction, and selection and exploration techniques in an integrated workflow. The approach is versatile and hardware-agnostic, supporting video stimuli from stationary and unconstrained mobile eye trackin alike. We conducted an expert review to assess labeling strategies and trust building.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Maurice Koch"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nan Cao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Daniel Weiskopf"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kuno Kurzhals"}],"award":"","doi":"10.1109/TVCG.2024.3392476","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243392476","image_caption":"Uncertainty-aware visualization approach for interactive labeling of eye-tracking videos that combines specifically designed glyphs, dimensionality reduction, and exploration techniques in an integrated workflow.","keywords":["Visual analytics, eye tracking, uncertainty, active learning, trust building"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243392476/v-tvcg-20243392476_Preview.mp4?token=gnYzxQO__CeGs4Cun8TMWoG6ni5U1YzoBSZnt1Eia3I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243392476/v-tvcg-20243392476_Preview.srt?token=nFyCu1x6MiFVIVsdLvxqWN9xHWDFQU7yBBPcpxucbos&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-tvcg","session_youtube_ff_id":"QQcYetRH7uw","session_youtube_ff_link":"https://youtu.be/QQcYetRH7uw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=0h12m48s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T14:27:00Z","title":"Active Gaze Labeling: Visualization for Trust Building","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243402610","abstract":"Point clouds are widely used as a versatile representation of 3D entities and scenes for all scale domains and in a variety of application areas, serving as a fundamental data category to directly convey spatial features. However, due to point sparsity, lack of structure, irregular distribution, and acquisition-related inaccuracies, results of point cloudvisualization are often subject to visual complexity and ambiguity. In this regard, non-photorealistic rendering can improve visual communication by reducing the cognitive effort required to understand an image or scene and by directing attention to important features. In the last 20 years, this has been demonstrated by various non-photorealistic rrendering approaches that were proposed to target point clouds specifically. However, they do not use a common language or structure for assessment which complicates comparison and selection. Further, recent developments regarding point cloud characteristics and processing, such as massive data size or web-based rendering are rarelyconsidered. To address these issues, we present a survey on non-photorealistic rendering approaches for point cloud visualization, providing an overview of the current state of research. We derive a structure for the assessment of approaches, proposing seven primary dimensions for the categorization regarding intended goals, data requirements, used techniques, and mode of operation. We then systematically assess corresponding approaches and utilize this classification to identify trends and research gaps, motivating future research in the development of effective non-photorealistic point cloud rendering methods.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Ole Wegen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Willy Scheibel"},{"affiliations":"","email":"","is_corresponding":false,"name":"Matthias Trapp"},{"affiliations":"","email":"","is_corresponding":false,"name":"Rico Richter"},{"affiliations":"","email":"","is_corresponding":false,"name":"J\u00fcrgen D\u00f6llner"}],"award":"","doi":"10.1109/TVCG.2024.3402610","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243402610","image_caption":"Non-photorealistic rendering (NPR) can improve visual communication by reducing the cognitive effort required to understand an image and by directing attention to important features. Over the past two decades, several NPR approaches have been developed, specifically targeting point clouds (1). To evaluate these methods, we use seven dimensions derived from the design process for point cloud NPR approaches (2). The systematic assessment of the corresponding approaches (3) allows us to identify trends and research gaps.","keywords":["Point clouds, survey, non-photorealistic rendering"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243402610/v-tvcg-20243402610_Preview.mp4?token=tXNQN9yjdAbyE28srWhHmmcERkq0tv8cnRvStp7S6fo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243402610/v-tvcg-20243402610_Preview.srt?token=H6gSriwCgV9HBrta1RH3a67XaSIrXcm7yFaM3WxEoaM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-tvcg","session_youtube_ff_id":"H6-xLO6_IzM","session_youtube_ff_link":"https://youtu.be/H6-xLO6_IzM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=0h22m40s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T14:39:00Z","title":"A Survey on Non-photorealistic Rendering Approaches for Point Cloud Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1833","abstract":"The concept of an intelligent augmented reality (AR) assistant has significant, wide-ranging applications, with potential uses in medicine, military, and mechanics domains. Such an assistant must be able to perceive the environment and actions, reason about the environment state in relation to a given task, and seamlessly interact with the task performer. These interactions typically involve an AR headset equipped with sensors which capture video, audio, and haptic feedback. Previous works have sought to facilitate the development of intelligent AR assistants by visualizing these sensor data streams in conjunction with the assistant's perception and reasoning model outputs. However, existing visual analytics systems do not focus on user modeling or include biometric data, and are only capable of visualizing a single task session for a single performer at a time. Moreover, they typically assume a task involves linear progression from one step to the next. We propose a visual analytics system that allows users to compare performance during multiple task sessions, focusing on non-linear tasks where different step sequences can lead to success. In particular, we design visualizations for understanding user behavior through functional near-infrared spectroscopy (fNIRS) data as a proxy for perception, attention, and memory as well as corresponding motion data (acceleration, angular velocity, and gaze). We distill these insights into embedding representations that allow users to easily select groups of sessions with similar behaviors. We provide two case studies that demonstrate how to use these visualizations to gain insights abouttask performance using data collected during helicopter copilot training tasks. Finally, we evaluate our approach by conducting an in-depth examination of a think-aloud experiment with five domain experts.","accessible_pdf":false,"authors":[{"affiliations":["New York University, New York, United States"],"email":"s.castelo@nyu.edu","is_corresponding":true,"name":"Sonia Castelo Quispe"},{"affiliations":["New York University, New York, United States"],"email":"jlrulff@gmail.com","is_corresponding":false,"name":"Jo\u00e3o Rulff"},{"affiliations":["New York University, Brooklyn, United States"],"email":"pss442@nyu.edu","is_corresponding":false,"name":"Parikshit Solunke"},{"affiliations":["New York University, New York, United States"],"email":"erin.mcgowan@nyu.edu","is_corresponding":false,"name":"Erin McGowan"},{"affiliations":["New York University, New York CIty, United States"],"email":"guandewu@nyu.edu","is_corresponding":false,"name":"Guande Wu"},{"affiliations":["New York University, Brooklyn, United States"],"email":"iran@ccrma.stanford.edu","is_corresponding":false,"name":"Iran Roman"},{"affiliations":["New York University, New York, United States"],"email":"rlopez@nyu.edu","is_corresponding":false,"name":"Roque Lopez"},{"affiliations":["New York University, Brooklyn, United States"],"email":"bs3639@nyu.edu","is_corresponding":false,"name":"Bea Steers"},{"affiliations":["New York University, New York, United States"],"email":"qisun@nyu.edu","is_corresponding":false,"name":"Qi Sun"},{"affiliations":["New York University, New York, United States"],"email":"jpbello@nyu.edu","is_corresponding":false,"name":"Juan Pablo Bello"},{"affiliations":["Northrop Grumman Mission Systems, Redondo Beach, United States"],"email":"bradley.feest@ngc.com","is_corresponding":false,"name":"Bradley S Feest"},{"affiliations":["Northrop Grumman, Aurora, United States"],"email":"michael.middleton@ngc.com","is_corresponding":false,"name":"Michael Middleton"},{"affiliations":["Northrop Grumman, Falls Church, United States"],"email":"ryan.mckendrick@ngc.com","is_corresponding":false,"name":"Ryan McKendrick"},{"affiliations":["New York University, New York City, United States"],"email":"csilva@nyu.edu","is_corresponding":false,"name":"Claudio Silva"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1833","image_caption":"HuBar is a visual analytics system designed to analyze performer behavior in AR-assisted tasks, enabling multi-perspective analysis of multimodal time-series data. It provides a hierarchical set of visualizations: the Scatter Plot View (A) identifies clusters and patterns, the Workload Aggregation View (B) summarizes cognitive workloads and errors, the Event Timeline View (C) aligns time series collected during sessions, enabling comparison across sessions and exploration to update linked views, the Summary Matrix View (D) analyzes procedure frequency and errors, and the Detail View (E) enables in-depth session exploration with synchronized video and time series visualizations.","keywords":["Perception & Cognition, Application Motivated Visualization, Temporal Data, Image and Video Data, Mobile, AR/VR/Immersive, Specialized Input/Display Hardware."],"open_access_supplemental_link":"https://github.com/VIDA-NYU/HuBar","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/pdf/2407.12260v1","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1833/v-full-1833_Preview.mp4?token=kXpVEp2G8JSoWr3JmAkTz5HHLuEVtLEGtn6CkLaAN8k&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1833/v-full-1833_Preview.srt?token=TJChJ8-kK0-qBRk7QY9CzGYXXCNAFxVIEHdA94znYnw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-full","session_youtube_ff_id":"AaX3LMAAkL4","session_youtube_ff_link":"https://youtu.be/AaX3LMAAkL4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=0h23m16s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T16:24:00Z","title":"HuBar: A Visual Analytics Tool to Explore Human Behaviour based on fNIRS in AR guidance systems","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1189","abstract":"The information visualization research community commonly produces supporting software to demonstrate technical contributions to the field. However, developing this software tends to be an overwhelming task. The final product tends to be a research prototype without much thought for modularization and re-usability, which makes it harder to replicate and adopt. This paper presents a design pattern for facilitating the creation, dissemination, and re-utilization of visualization techniques using reactive widgets. The design pattern features basic concepts that leverage modern front-end development best practices and standards, which facilitate development and replication. The paper presents several usage examples of the pattern, templates for implementation, and even a wrapper for facilitating the conversion of any Vega [27,28] specification into a reactive widget.","accessible_pdf":true,"authors":[{"affiliations":["Northeastern University, San Francisco, United States"],"email":"jguerra@northeastern.edu","is_corresponding":true,"name":"John Alexis Guerra-Gomez"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1189","image_caption":"IEEE VIS 2024 Paper embedding explorer main interface, showing a main scatterplot of each one of the papers of the conference distributed using UMAP dimensionality reduction. The scatterplot has been brushed for selecting papers that are highlighted on the bottom of the page showing the thumbnail image, title and abstract. On the top some controls allow for the selection of the dimensionality reduction method and some hyperparameters","keywords":["Information Visualization, Software Components, Reactive Components, Notebook Programming, Direct Manipulation, Brush and Linking"],"open_access_supplemental_link":"https://observablehq.com/@john-guerra/reactive-widgets","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1189/v-short-1189_Preview.mp4?token=rPe1SPGjCEZc8bReZDXHzfLMpdURwWQ8JW0xz4oXo94&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1189/v-short-1189_Preview.srt?token=37NNBJWgqLS1dSoUD3CeZ8ifLVuy098GbLy_KBSmUFY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-short","session_youtube_ff_id":"gPxR7ibKKvY","session_youtube_ff_link":"https://youtu.be/gPxR7ibKKvY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=1h0m6s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T17:00:00Z","title":"Towards Reusable and Reactive Widgets for Information Visualization Research and Dissemination","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233261320","abstract":"In recent years, narrative visualization has gained much attention. Researchers have proposed different design spaces for various narrative visualization genres and scenarios to facilitate the creation process. As users' needs grow and automation technologies advance, increasingly more tools have been designed and developed. In this study, we summarized six genres of narrative visualization (annotated charts, infographics, timelines & storylines, data comics, scrollytelling & slideshow, and data videos) based on previous research and four types of tools (design spaces, authoring tools, ML/AI-supported tools and ML/AI-generator tools) based on the intelligence and automation level of the tools. We surveyed 105 papers and tools to study how automation can progressively engage in visualization design and narrative processes to help users easily create narrative visualizations. This research aims to provide an overview of current research and development in the automation involvement of narrative visualization tools. We discuss key research problems in each category and suggest new opportunities to encourage further research in the related domain.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Qing Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Shixiong Cao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jiazhe Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nan Cao"}],"award":"","doi":"10.1109/TVCG.2023.3261320","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233261320","image_caption":"Number of relevant research publications or tools in different genres for narrative visualization in chronological order. This matrix visualizes the distribution of research publications and tools across six narrative visualization genres: Annotated Chart, Infographic, Timeline & Storyline, Data Comics, Scrollytelling & Slideshow, and Data Video, from before 2010 through 2022. Each colored circle represents a type of tool: Design Space (red), Authoring Tool (orange), ML/AI-supported Tool (green), or ML/AI-generator Tool (purple). The numbers represent the total count of publications or tools per genre per year, providing insights into the evolution and focus of research in narrative visualization over time.","keywords":["Data Visualization, Automatic Visualization, Narrative Visualization, Design Space, Authoring Tools, Survey"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2206.12118","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233261320/v-tvcg-20233261320_Preview.mp4?token=tPrsCIJecGNwXtkTPTVmcb1BRVVVRCOx89fOlRxbpgw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233261320/v-tvcg-20233261320_Preview.srt?token=h7SGnaiJL8ly5r7DJuX2R33E8PRYUlNmo_OZAJ9wHd4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"xPShwRDa_9U","session_youtube_ff_link":"https://youtu.be/xPShwRDa_9U","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=0h35m40s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T16:36:00Z","title":"How Does Automation Shape the Process of Narrative Visualization: A Survey of Tools","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233346641","abstract":"Currently, growing data sources and long-running algorithms impede user attention and interaction with visual analytics applications. Progressive visualization (PV) and visual analytics (PVA) alleviate this problem by allowing immediate feedback and interaction with large datasets and complex computations, avoiding waiting for complete results by using partial results improving with time. Yet, creating a progressive visualization requires more effort than a regular visualization but also opens up new possibilities, such as steering the computations towards more relevant parts of the data, thus saving computational resources. However, there is currently no comprehensive overview of the design space for progressive visualization systems. We surveyed the related work of PV and derived a new taxonomy for progressive visualizations by systematically categorizing all PV publications that included visualizations with progressive features. Progressive visualizations can be categorized by well-known visualization taxonomies, but we also found that progressive visualizations can be distinguished by the way they manage their data processing, data domain, and visual update. Furthermore, we identified key properties such as uncertainty, steering, visual stability, and real-time processing that are significantly different with progressive applications. We also collected evaluation methodologies reported by the publications and conclude with statistical findings, research gaps, and open challenges. A continuously updated visual browser of the survey data is available at visualsurvey.net/pva.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Alex Ulmer"},{"affiliations":"","email":"","is_corresponding":false,"name":"Marco Angelini"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jean-Daniel Fekete"},{"affiliations":"","email":"","is_corresponding":false,"name":"J\u00f6rn Kohlhammerm"},{"affiliations":"","email":"","is_corresponding":false,"name":"Thorsten May"}],"award":"","doi":"10.1109/TVCG.2023.3346641","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233346641","image_caption":"Our new taxonomy for progressive visualisations. The categories of visualisation are based on previous taxonomies proposed by Shneiderman, Keim and Munzner. The categories of progressive processing represent an extension of the characterisation proposed by Angelini et al., with the addition of a new variant, termed 'custom chunking'. The categories of data domain address the implications of differing visualisation designs in the context of known and unknown data or process endpoints. The fourth category is visual update pattern, which indicates the manner in which visualisations are updated in response to the generation of new partial results.","keywords":["Data visualization, Convergence, Visual analytics, Taxonomy Surveys, Rendering (computer graphics), Task analysis, Progressive Visual Analytics, Progressive Visualization, Taxonomy, State-of-the-Art Report, Survey"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346641/v-tvcg-20233346641_Preview.mp4?token=-2u0ZjP8hKuE44Jh4xYL2aNynKfTkVJhCVBS1JjCWng&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346641/v-tvcg-20233346641_Preview.srt?token=jl-uuHvQ3_CFNepSIiaxzD7GooOQhHVcC-8A7FXfBRc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"XXxozI-bcog","session_youtube_ff_link":"https://youtu.be/XXxozI-bcog","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=0h48m8s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T16:48:00Z","title":"A Survey on Progressive Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243390219","abstract":"This system paper documents the technical foundations for the extension of the Topology ToolKit (TTK) to distributed-memory parallelism with the Message Passing Interface (MPI). While several recent papers introduced topology-based approaches for distributed-memory environments, these were reporting experiments obtained with tailored, mono-algorithm implementations. In contrast, we describe in this paper a versatile approach (supporting both triangulated domains and regular grids) for the support of topological analysis pipelines, i.e. a sequence of topological algorithms interacting together, possibly on distinct numbers of processes. While developing this extension, we faced several algorithmic and software engineering challenges, which we document in this paper. We describe an MPI extension of TTK\u2019s data structure for triangulation representation and traversal, a central component to the global performance and generality of TTK\u2019s topological implementations. We also introduce an intermediate interface between TTK and MPI, both at the global pipeline level, and at the fine-grain algorithmic level. We provide a taxonomy for the distributed-memory topological algorithms supported by TTK, depending on their communication needs and provide examples of hybrid MPI+thread parallelizations. Detailed performance analyses show that parallel efficiencies range from 20% to 80% (depending on the algorithms), and that the MPI-specific preconditioning introduced by our framework induces a negligible computation time overhead. We illustrate the new distributed-memory capabilities of TTK with an example of advanced analysis pipeline, combining multiple algorithms, run on the largest publicly available dataset we have found (120 billion vertices) on a standard cluster with 64 nodes (for a total of 1536 cores). Finally, we provide a roadmap for the completion of TTK\u2019s MPI extension, along with generic recommendations for each algorithm communication category.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"E. Le Guillou"},{"affiliations":"","email":"","is_corresponding":false,"name":"M. Will"},{"affiliations":"","email":"","is_corresponding":false,"name":"P. Guillou"},{"affiliations":"","email":"","is_corresponding":false,"name":"J. Lukasczyk"},{"affiliations":"","email":"","is_corresponding":false,"name":"P. Fortin"},{"affiliations":"","email":"","is_corresponding":false,"name":"C. Garth"},{"affiliations":"","email":"","is_corresponding":false,"name":"J. Tierny"}],"award":"","doi":"10.1109/TVCG.2024.3390219","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243390219","image_caption":"Output of an integrated pipeline that produces a real-life use case combining all of the algorithms parallelized in our paper. The pipeline is executed on the Turbulent Channel Flow dataset (120 billion vertices), a three-dimensional regular grid with two scalar fields, the pressure of the fluid and its gradient magnitude. The spheres correspond to the pressure critical points and the tubes are the integral lines starting at saddle points. Figure (a) shows all of the produced geometry, while (b) and (c) show parts of the output zoomed in. ","keywords":["Topological data analysis, high-performance computing, distributed-memory algorithms."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/pdf/2310.08339","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243390219/v-tvcg-20243390219_Preview.mp4?token=JZVVxFRRTH9J64rV7USkfm_tjj3QFugQtpJl9kFyeOU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"CZWLfhlBYiQ","session_youtube_ff_link":"https://youtu.be/CZWLfhlBYiQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=0h12m56s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T16:12:00Z","title":"TTK is Getting MPI-Ready","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243406387","abstract":"The process of labeling medical text plays a crucial role in medical research. Nonetheless, creating accurately labeled medical texts of high quality is often a time-consuming task that requires specialized domain knowledge. Traditional methods for generating labeled data typically rely on rigid rule-based approaches, which may not adapt well to new tasks. While recent machine learning (ML) methodologies have mitigated the manual labeling efforts, configuring models to align with specific research requirements can be challenging for labelers without technical expertise. Moreover, automated labeling techniques, such as transfer learning, face difficulties in in directly incorporating expert input, whereas semi-automated methods, like data programming, allow knowledge integration through rules or knowledge bases but may lack continuous result refinement throughout the entire labeling process. In this study, we present a collaborative human-ML teaming workflow that seamlessly integrates visual cluster analysis and active learning to assist domain experts in labeling medical text with high efficiency. Additionally, we introduce an innovative neural network model called the embedding network, which incorporates expert insights to generate task-specific embeddings for medical texts. We integrate the workflow and embedding network into a visual analytics tool named KMTLabeler, equipped with coordinated multi-level views and interactions. Two illustrative case studies, along with a controlled user study, provide substantial evidence of the effectiveness of KMTLabeler in creating an efficient labeling environment for medical text classification.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"He Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yang Ouyang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yuchen Wu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chang Jiang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lixia Jin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yuanwu Cao"},{"affiliations":"","email":"","is_corresponding":true,"name":"Quan Li"}],"award":"","doi":"10.1109/TVCG.2024.3406387","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243406387","image_caption":" The KMTLabeler interface: The (A) Control Panel provides an overview of the dataset and enables filtering for labeling. The (B) Embedding Projection View allows users to compare and adjust projection structures for pattern exploration, while the (C) Weight Modification Panel and the (D) Rule Formulation Panel enable knowledge-based tuning of projection structures to align them with specific tasks. The (E) Cluster Comparison View facilitates detailed comparison of clusters for label creation, and the (F) Label Evaluation View evaluates clustering groups according to various metrics. The (G) Action Record View tracks actions during labeling, and (H) Active Learning Panel supports \"one-by-one\" labeling of suggested instances.","keywords":["Medical Text Labeling, Expert Knowledge, Embedding Network, Visual Cluster Analysis, Active Learning"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243406387/v-tvcg-20243406387_Preview.mp4?token=VnPRkdepeRVTnH4pmVCk7zBKKkpq42d2mzlaLMT86as&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243406387/v-tvcg-20243406387_Preview.srt?token=MPFzXxvpYtQoeH-IVkROFZyv0TberWC63x-c_lzzevI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"s2lF1u4g7c4","session_youtube_ff_link":"https://youtu.be/s2lF1u4g7c4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=0h0m39s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T16:00:00Z","title":"KMTLabeler: An Interactive Knowledge-Assisted Labeling Tool for Medical Text Classification","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1705","abstract":"Contour trees describe the topology of level sets in scalar fields and are widely used in topological data analysis and visualization. A main challenge of utilizing contour trees for large-scale scientific data is their computation at scale using high-performance computing. To address this challenge, recent work has introduced distributed hierarchical contour trees for distributed computation and storage of contour trees. However, effective use of these distributed structures in analysis and visualization requires subsequent computation of geometric properties and branch decomposition to support contour extraction and exploration. In this work, we introduce distributed algorithms for augmentation, hypersweeps, and branch decomposition that enable parallel computation of geometric properties, and support the use of distributed contour trees as query structures for scientific exploration. We evaluate the parallel performance of these algorithms and apply them to identify and extract important contours for scientific visualization.","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"mingzhefluorite@gmail.com","is_corresponding":true,"name":"Mingzhe Li"},{"affiliations":["University of Leeds, Leeds, United Kingdom"],"email":"h.carr@leeds.ac.uk","is_corresponding":false,"name":"Hamish Carr"},{"affiliations":["Lawrence Berkeley National Laboratory, Berkeley, United States"],"email":"oruebel@lbl.gov","is_corresponding":false,"name":"Oliver R\u00fcbel"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"wang.bei@gmail.com","is_corresponding":false,"name":"Bei Wang"},{"affiliations":["Lawrence Berkeley National Laboratory, Berkeley, United States"],"email":"ghweber@lbl.gov","is_corresponding":false,"name":"Gunther H Weber"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1705","image_caption":"Our method applied to a 3D WarpX laser-driven, plasma-based particle accelerator simulation dataset with a resolution of 6791x371x371. We use the x-component of the electric field. Left: three 2D slices of the volume along different axes with the extracted contours on the slice. Right: Using distributed topological data analysis to extract and visualize 3D isosurfaces corresponding to the top-11 branches of the contour tree.","keywords":["Contour trees, branch decomposition, parallel algorithms, computational topology, topological data analysis"],"open_access_supplemental_link":"https://gitlab.kitware.com/vtk/vtk-m","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1705/v-full-1705_Preview.mp4?token=KRb7CIhbrASvEyQDGsuCalh0XjYtWrTtPA72tjD062g&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-full","session_youtube_ff_id":"_RvXzzJfjFA","session_youtube_ff_link":"https://youtu.be/_RvXzzJfjFA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=0h27m49s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T14:39:00Z","title":"Distributed Augmentation, Hypersweeps, and Branch Decomposition of Contour Trees for Scientific Exploration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1793","abstract":"This research explores a novel paradigm for preserving topological segmentations in existing error-bounded lossy compressors. Today's lossy compressors rarely consider preserving topologies such as Morse-Smale complexes, and the discrepancies in topology between original and decompressed datasets could potentially result in erroneous interpretations or even incorrect scientific conclusions. In this paper, we focus on preserving Morse-Smale segmentations in 2D/3D piecewise linear scalar fields, targeting the precise reconstruction of minimum/maximum labels induced by the integral line of each vertex. The key is to derive a series of edits during compression time. These edits are applied to the decompressed data, leading to an accurate reconstruction of segmentations while keeping the error within the prescribed error bound. To this end, we develop a workflow to fix extrema and integral lines alternatively until convergence within finite iterations. We accelerate each workflow component with shared-memory/GPU parallelism to make the performance practical for coupling with compressors. We demonstrate use cases with fluid dynamics, ocean, and cosmology application datasets with a significant acceleration with an NVIDIA A100 GPU.","accessible_pdf":true,"authors":[{"affiliations":["The Ohio State University, Columbus, United States"],"email":"li.14025@osu.edu","is_corresponding":true,"name":"Yuxiao Li"},{"affiliations":["University of California, Riverside, Riverside, United States"],"email":"xlian007@ucr.edu","is_corresponding":false,"name":"Xin Liang"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"wang.bei@gmail.com","is_corresponding":false,"name":"Bei Wang"},{"affiliations":["The Ohio State University, Columbus, United States"],"email":"qiu.722@osu.edu","is_corresponding":false,"name":"Yongfeng Qiu"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"lyan@anl.gov","is_corresponding":false,"name":"Lin Yan"},{"affiliations":["The Ohio State University, Columbus, United States"],"email":"guo.2154@osu.edu","is_corresponding":false,"name":"Hanqi Guo"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1793","image_caption":"This figure compares SZ3 and ours (SZ3) in terms of feature preservation capability for MSS in combustion data. False cases are highlighted with boxes.","keywords":["Lossy compression, feature-preserving compression, Morse-Smale segmentations, shared-memory parallelism."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2406.09423","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1793/v-full-1793_Preview.mp4?token=mq_GwqVoaX8VM0EBDuKWSvU8zRZEYMCZpPRYlndDs5E&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1793/v-full-1793_Preview.srt?token=qZnV8L0OVhtE5M8C6zYi7I6Me-QIz-UonlBkWLAHe44&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-full","session_youtube_ff_id":"TRMO8YUuSSs","session_youtube_ff_link":"https://youtu.be/TRMO8YUuSSs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=0h0m44s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T14:15:00Z","title":"MSz: An Efficient Parallel Algorithm for Correcting Morse-Smale Segmentations in Error-Bounded Lossy Compressors","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1803","abstract":"Scalar field comparison is a fundamental task in scientific visualization. In topological data analysis, we compare topological descriptors of scalar fields---such as persistence diagrams and merge trees---because they provide succinct and robust abstract representations. Several similarity measures for topological descriptors seem to be both asymptotically and practically efficient with polynomial time algorithms, but they do not scale well when handling large-scale, time-varying scientific data and ensembles. In this paper, we propose a new framework to facilitate the comparative analysis of merge trees, inspired by tools from locality sensitive hashing (LSH). LSH hashes similar objects into the same hash buckets with high probability. We propose two new similarity measures for merge trees that can be computed via LSH, using new extensions to Recursive MinHash and subpath signature, respectively. Our similarity measures are extremely efficient to compute and closely resemble the results of existing measures such as merge tree edit distance or geometric interleaving distance. Our experiments demonstrate the utility of our LSH framework in applications such as shape matching, clustering, key event detection, and ensemble summarization. ","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, SALT LAKE CITY, United States"],"email":"lyuweiran@gmail.com","is_corresponding":true,"name":"Weiran Lyu"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"g.s.raghavendra@gmail.com","is_corresponding":false,"name":"Raghavendra Sridharamurthy"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jeffp@cs.utah.edu","is_corresponding":false,"name":"Jeff M. Phillips"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"wang.bei@gmail.com","is_corresponding":false,"name":"Bei Wang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1803","image_caption":"An overview of our pipeline is shown in the representative image. Given a set of scalar fields as input, we first simplify each scalar field using a small persistence threshold to remove noise from the data. We then compute the corresponding merge tree with labeling. These merge trees are subsequently used to generate signatures using either the RMH or subpath signature algorithms. Locality-sensitive hashing (LSH) is employed to divide the signatures into bands and rows. Finally, for empirical comparison, we generate distance matrices by collecting similar pairs from the LSH.","keywords":["Merge trees, locality sensitive hashing, comparative analysis, topological data analysis, scientific visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1803/v-full-1803_Preview.mp4?token=bTKTc2VQvpQs8YUA6AesgADr3vNA7g6xS8QX_9lFKj4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1803/v-full-1803_Preview.srt?token=YJGJgzwZGd5-Z0ayemd9xf2RokZnwKSp5oU17mupfg0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-full","session_youtube_ff_id":"77lGpXvrG0k","session_youtube_ff_link":"https://youtu.be/77lGpXvrG0k","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=0h14m46s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T14:27:00Z","title":"Fast Comparative Analysis of Merge Trees Using Locality-Sensitive Hashing","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1188","abstract":"Vortices and their analysis play a critical role in the understanding of complex phenomena in turbulent flows. Traditional vortex extraction methods, notably region-based techniques, often overlook the entanglement phenomenon, resulting in the inclusion of multiple vortices within a single extracted region. Their separation is necessary for quantifying different types of vortices and their statistics. In this study, we propose a novel vortex separation method that extends the conventional contour tree-based segmentation approach with an additional step termed \u201clayering\u201d. Upon extracting a vortical region using specified vortex criteria (e.g., \u03bb2), we initially establish topological segmentation based on the contour tree, followed by the layering process to allocate appropriate segmentation IDs to unsegmented cells, thus separating individual vortices within the region. However, these regions may still suffer from inaccurate splits, which we address statistically by leveraging the continuity of vorticity lines across the split boundaries. Our findings demonstrate a significant improvement in both the separation of vortices and the mitigation of inaccurate splits compared to prior methods.","accessible_pdf":false,"authors":[{"affiliations":["University of Houston, Houston, United States"],"email":"adeelz92@gmail.com","is_corresponding":true,"name":"Adeel Zafar"},{"affiliations":["University of Houston, Houston, United States"],"email":"zpoorsha@cougarnet.uh.edu","is_corresponding":false,"name":"Zahra Poorshayegh"},{"affiliations":["University of Houston, Houston, United States"],"email":"diyang@uh.edu","is_corresponding":false,"name":"Di Yang"},{"affiliations":["University of Houston, Houston, United States"],"email":"chengu@cs.uh.edu","is_corresponding":false,"name":"Guoning Chen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1188","image_caption":"This figure illustrates the steps of the proposed topological separation method. (a) shows a vortical region extracted using a specific value of ?2, along with the critical points of the minimal join tree. (b) displays the contour tree-based segmentation of the region using the extracted minimal join tree. (c) depicts the use of \ufffdlayering\ufffd to assign appropriate segmentation IDs to the segment (red) associated with the maximum. (d) shows the region being separated into exactly two vortices (green and blue). (e) illustrates the process of ensuring the validity of the split by computing the vorticity lines in the vicinity of the split.\" ","keywords":["Fluid flow, vortices, vortex topology"],"open_access_supplemental_link":"https://arxiv.org/src/2407.03384v1/anc/supp_doc.pdf","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.03384","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1188/v-short-1188_Preview.mp4?token=sgv_irFDfgo2UYspqvHcN4nevlgpkBM3_ckeCMVGAiU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1188/v-short-1188_Preview.srt?token=SYeVB_zKh2OueId1Sa6sV8jHchqiMdi7Sb3LdtGw9Yk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-short","session_youtube_ff_id":"fzAYRuAZbwA","session_youtube_ff_link":"https://youtu.be/fzAYRuAZbwA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=1h4m31s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T15:15:00Z","title":"Topological Separation of Vortices","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233330262","abstract":"This paper presents a computational framework for the concise encoding of an ensemble of persistence diagrams, in the form of weighted Wasserstein barycenters [100], [102] of a dictionary of atom diagrams. We introduce a multi-scale gradient descent approach for the efficient resolution of the corresponding minimization problem, which interleaves the optimization of the barycenter weights with the optimization of the atom diagrams. Our approach leverages the analytic expressions for the gradient of both sub-problems to ensure fast iterations and it additionally exploits shared-memory parallelism. Extensive experiments on public ensembles demonstrate the efficiency of our approach, with Wasserstein dictionary computations in the orders of minutes for the largest examples. We show the utility of our contributions in two applications. First, we apply Wassserstein dictionaries to data reduction and reliably compress persistence diagrams by concisely representing them with their weights in the dictionary. Second, we present a dimensionality reduction framework based on a Wasserstein dictionary defined with a small number of atoms (typically three) and encode the dictionary as a low dimensional simplex embedded in a visual space (typically in 2D). In both applications, quantitative experiments assess the relevance of our framework. Finally, we provide a C++ implementation that can be used to reproduce our results.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Keanu Sisouk"},{"affiliations":"","email":"","is_corresponding":false,"name":"Julie Delon"},{"affiliations":"","email":"","is_corresponding":false,"name":"Julien Tierny"}],"award":"","doi":"10.1109/TVCG.2023.3330262","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233330262","image_caption":"Visual comparison (left) between the input persistence diagrams for three members of an initial ensemble (one member per ground-truth cluster class). For each member, the sphere color encodes the correspondence between the input and the compressed diagrams. This visual comparison shows that the main features of the diagrams are well preserved by our reduction approach, for which a low relative reconstruction error can be observed. The planar overview of the ensemble (right) generated by our dimensionality reduction enables the visualization of the relations between the different diagrams of the ensemble.","keywords":["Topological data analysis, ensemble data, persistence diagrams"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2304.14852","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233330262/v-tvcg-20233330262_Preview.mp4?token=5OxVTy6viqjBpmHEOIm4vvAGI8HOoHWyPgysuZR-PWk&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-tvcg","session_youtube_ff_id":"h_qmhmjYFFs","session_youtube_ff_link":"https://youtu.be/h_qmhmjYFFs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=0h40m6s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T14:51:00Z","title":"Wasserstein Dictionaries of Persistence Diagrams","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233334755","abstract":"This paper presents a computational framework for the Wasserstein auto-encoding of merge trees (MT-WAE), a novel extension of the classical auto-encoder neural network architecture to the Wasserstein metric space of merge trees. In contrast to traditional auto-encoders which operate on vectorized data, our formulation explicitly manipulates merge trees on their associated metric space at each layer of the network, resulting in superior accuracy and interpretability. Our novel neural network approach can be interpreted as a non-linear generalization of previous linear attempts [79] at merge tree encoding. It also trivially extends to persistence diagrams. Extensive experiments on public ensembles demonstrate the efficiency of our algorithms, with MT-WAE computations in the orders of minutes on average. We show the utility of our contributions in two applications adapted from previous work on merge tree encoding [79]. First, we apply MT-WAE to merge tree compression, by concisely representing them with their coordinates in the final layer of our auto-encoder. Second, we document an application to dimensionality reduction, by exploiting the latent space of our auto-encoder, for the visual analysis of ensemble data. We illustrate the versatility of our framework by introducing two penalty terms, to help preserve in the latent space both the Wasserstein distances between merge trees, as well as their clusters. In both applications, quantitative experiments assess the relevance of our framework. Finally, we provide a C++ implementation that can be used for reproducibility.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Mathieu Pont"},{"affiliations":"","email":"","is_corresponding":true,"name":"Julien Tierny"}],"award":"","doi":"10.1109/TVCG.2023.3334755","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233334755","image_caption":"Visual analysis of the Earthquake ensemble ((a) each ground-truth class is represented by one of its members), with our Wasserstein Auto-Encoder of Merge Trees (MT-WAE). We apply our contributions to merge tree compression ((b), right) by simply storing their coordinates in the last decoding layer of our network. We exploit the latent space of our network to generate 2D layouts of the ensemble (c). The reconstruction of user-defined locations ((c) and (d), purple) enables an interactive exploration of the latent space. MT-WAE also supports persistence correlation views (e), which reveal the persistent features which exhibit the most variability in the ensemble. ","keywords":["Topological data analysis, ensemble data, persistence diagrams, merge trees, auto-encoders, neural networks"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233334755/v-tvcg-20233334755_Preview.mp4?token=NCOxCxgJ-2fZg33e5WgixMfQD43MuXdvBkAm4BXwzrI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233334755/v-tvcg-20233334755_Preview.srt?token=SGT5s_ZQcXHR43diaNIsh61J_pojn88sDLy60lEsaso&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-tvcg","session_youtube_ff_id":"jop4MUY9KDE","session_youtube_ff_link":"https://youtu.be/jop4MUY9KDE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=0h52m8s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T15:03:00Z","title":"Wasserstein Auto-Encoders of Merge Trees (and Persistence Diagrams)","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1039","abstract":"Propagation analysis refers to studying how information spreads on social media, a pivotal endeavor for understanding social sentiment and public opinions. Numerous studies contribute to visualizing information spread, but few have considered the implicit and complex diffusion patterns among multiple platforms. To bridge the gap, we summarize cross-platform diffusion patterns with experts and identify significant factors that dissect the mechanisms of cross-platform information spread. Based on that, we propose an information diffusion model that estimates the likelihood of a topic/post spreading among different social media platforms. Moreover, we propose a novel visual metaphor that encapsulates cross-platform propagation in a manner analogous to the spread of seeds across gardens. Specifically, we visualize platforms, posts, implicit cross-platform routes, and salient instances as elements of a virtual ecosystem \u2014 gardens, flowers, winds, and seeds, respectively. We further develop a visual analytic system, namely BloomWind, that enables users to quickly identify the cross-platform diffusion patterns and investigate the relevant social media posts. Ultimately, we demonstrate the usage of BloomWind through two case studies and validate its effectiveness using expert interviews.","accessible_pdf":false,"authors":[{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"940662579@qq.com","is_corresponding":true,"name":"Jianing Yin"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"hzjia@zju.edu.cn","is_corresponding":false,"name":"Hanze Jia"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"zhoubuwei@zju.edu.cn","is_corresponding":false,"name":"Buwei Zhou"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"tangtan@zju.edu.cn","is_corresponding":false,"name":"Tan Tang"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"yingluu@zju.edu.cn","is_corresponding":false,"name":"Lu Ying"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"sn_ye@zju.edu.cn","is_corresponding":false,"name":"Shuainan Ye"},{"affiliations":["Michigan State University, East Lansing, United States"],"email":"pengtaiq@msu.edu","is_corresponding":false,"name":"Tai-Quan Peng"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1039","image_caption":"Interface Overview of BloomWind (Cluster-level): (a) Cluster-level Propagation View, demonstrating the diffusion process of topics among platforms; (b) Timeline View, for selecting a time frame and controlling the animation process of propagation; (c) Cluster-level Detail View, listing the post and user information by topic and platform.","keywords":["Propagation analysis, social media visualization, cross-platform propagation, metaphor design"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1039/v-full-1039_Preview.mp4?token=RMStTCRgaLgf8wuswgS8B2S_P9JX_UtU18dAZAvK_6I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1039/v-full-1039_Preview.srt?token=gd0jO6EgrCVsS41Ukgf-sCEr_i-Rbk7cSV5HtnvGPuw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-full","session_youtube_ff_id":"orsYGZt1cWI","session_youtube_ff_link":"https://youtu.be/orsYGZt1cWI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=0h54m24s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T18:33:00Z","title":"Blowing Seeds Across Gardens: Visualizing Implicit Propagation of Cross-Platform Social Media Posts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1325","abstract":"Dynamic data visualizations can convey large amounts of information over time, such as using motion to depict changes in data values for multiple entities. Such dynamic displays put a demand on our visual processing capacities, yet our perception of motion is limited. Several techniques have been shown to improve the processing of dynamic displays. Staging the animation to sequentially show steps in a transition and tracing object movement by displaying trajectory histories can improve processing by reducing the cognitive load. In this paper, We examine the effectiveness of staging and tracing in dynamic displays. We showed participants animated line charts depicting the movements of lines and asked them to identify the line with the highest mean and variance. We manipulated the animation to display the lines with or without staging, tracing and history, and compared the results to a static chart as a control. Results showed that tracing and staging are preferred by participants, and improve their performance in mean and variance tasks respectively. They also preferred display time 3 times shorter when staging is used. Also, encoding animation speed with mean and variance in congruent tasks is associated with higher accuracy. These findings help inform real-world best practices for building dynamic displays. The supplementary materials can be found at https://osf.io/8c95v/","accessible_pdf":false,"authors":[{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"shu343@gatech.edu","is_corresponding":true,"name":"Songwen Hu"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"ouxunjiang@u.northwestern.edu","is_corresponding":false,"name":"Ouxun Jiang"},{"affiliations":["Dolby Laboratories Inc., San Francisco, United States"],"email":"jcr@dolby.com","is_corresponding":false,"name":"Jeffrey Riedmiller"},{"affiliations":["Georgia Tech, Atlanta, United States","University of Massachusetts Amherst, Amherst, United States"],"email":"cxiong@gatech.edu","is_corresponding":false,"name":"Cindy Xiong Bearfield"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1325","image_caption":"Examples of different animation design options. The animations are arranged in a time sequence from top to bottom and categorized into six conditions from left to right.","keywords":["Animation, Dynamic Displays, Perception, Motion, Analytic Tasks"],"open_access_supplemental_link":"https://osf.io/8c95v/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1325/v-full-1325_Preview.mp4?token=znV_u6YW7G-s5ppYwiYo4wWNrKPrsIIJxfTiurV5PC8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-full","session_youtube_ff_id":"pY3yFbMe5RE","session_youtube_ff_link":"https://youtu.be/pY3yFbMe5RE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=0h44m13s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T17:45:00Z","title":"Motion-Based Visual Encoding Can Improve Performance on Perceptual Tasks with Dynamic Time Series","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1451","abstract":"We present a systematic review, an empirical study, and a first set of considerations for designing visualizations in motion, derived from a concrete scenario in which these visualizations were used to support a primary task. In practice, when viewers are confronted with embedded visualizations, they often have to focus on a primary task and can only quickly glance at a visualization showing rich, often dynamically updated, information. As such, the visualizations must be designed so as not to distract from the primary task, while at the same time being readable and useful for aiding the primary task. For example, in games, players who are engaged in a battle have to look at their enemies but also read the remaining health of their own game character from the health bar over their character's head. Many trade-offs are possible in the design of embedded visualizations in such dynamic scenarios, which we explore in-depth in this paper with a focus on user experience. We use video games as an example of an application context with a rich existing set of visualizations in motion. We begin our work with a systematic review of in-game visualizations in motion. Next, we conduct an empirical user study to investigate how different embedded visualizations in motion designs impact user experience. We conclude with a set of considerations and trade-offs for designing visualizations in motion more broadly as derived from what we learned about video games. All supplemental materials of this paper are available at osf.io/3v8wm/.","accessible_pdf":true,"authors":[{"affiliations":["Xi'an Jiaotong-Liverpool University, Suzhou, China","Universit\u00e9 Paris-Saclay, CNRS, Inria, Gif-sur-Yvette, France"],"email":"yaolijie0219@gmail.com","is_corresponding":true,"name":"Lijie Yao"},{"affiliations":["Univerisit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"federicabucchieri@gmail.com","is_corresponding":false,"name":"Federica Bucchieri"},{"affiliations":["Carleton University, Ottawa, Canada"],"email":"dieselfish@gmail.com","is_corresponding":false,"name":"Victoria McArthur"},{"affiliations":["LISN, Universit\u00e9 Paris-Saclay, CNRS, INRIA, Orsay, France"],"email":"anastasia.bezerianos@universite-paris-saclay.fr","is_corresponding":false,"name":"Anastasia Bezerianos"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"petra.isenberg@inria.fr","is_corresponding":false,"name":"Petra Isenberg"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1451","image_caption":"Three situated visualizations tested in our game RobotLife: Left - a horizontal bar chart positioned outside of the game enemy character, Center - a vertical bar chart integrated in the texture of the game enemy character, and Right - a circular bar chart (donut chart) partially match to the design of game enemy character.","keywords":["Situated visualization, visualization in motion, design considerations"],"open_access_supplemental_link":"https://osf.io/3v8wm/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2408.01991","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1451/v-full-1451_Preview.mp4?token=vn2MF9S6BIWPnnQW9jdGn6mw5twlPZQJCJeUv_PSqLI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1451/v-full-1451_Preview.srt?token=3ZLLU9lFYcU3KI9gdD-fMPf0fksfp4GKOwUShVB7RhM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-full","session_youtube_ff_id":"X9GOtQyXfx8","session_youtube_ff_link":"https://youtu.be/X9GOtQyXfx8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=0h20m37s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T18:09:00Z","title":"User Experience of Visualizations in Motion: A Case Study and Design Considerations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1192","abstract":"Narrative visualization has become a crucial tool in data presentation, merging storytelling with data visualization to convey complex information in an engaging and accessible manner. In this study, we review the design space for narrative visualizations, focusing on animation style, through a comprehensive analysis of 80 papers from key visualization venues. We categorize these papers into six broad themes: Animation Style, Interactivity, Technology Usage, Methodology Development, Evaluation Type, and Application Domain. Our findings reveal a significant evolution in the field, marked by a growing preference for animated and non-interactive techniques. This trend reflects a shift towards minimizing user interaction while enhancing the clarity and impact of data presentation. We also identified key trends and technologies shaping the field, highlighting the role of technologies, such as machine learning in driving these changes. We offer insights into the dynamic interrelations within the narrative visualization domains, and suggest future research directions, including exploring non-interactive techniques, examining the interplay between different visualization elements, and developing domain-specific visualizations.","accessible_pdf":false,"authors":[{"affiliations":["Louisiana State University, Baton Rouge, United States"],"email":"jyang44@lsu.edu","is_corresponding":true,"name":"Vyri Junhan Yang"},{"affiliations":["Louisiana State University, Baton Rouge, United States"],"email":"mjasim@lsu.edu","is_corresponding":false,"name":"Mahmood Jasim"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1192","image_caption":"We explore the design space of narrative visualization, focusing on animation styles. We categorize 80 papers from top visualization venues into six categories, including Animation Style, Interactivity, Methodology, Technology, Evaluation Type , and Application Domain. We discuss the interplay between different visualization techniques and elements and the trend to focus on domain-specific visualizations.","keywords":["Narrative visualizations, static and animated visualization, categorization, design space"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1192/v-short-1192_Preview.mp4?token=wSAJAGdGX1p1H0svxN5jho_bPjtmkIq3M03qDyIOPPM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1192/v-short-1192_Preview.srt?token=XE-6-nE7biDRyNXm7vjOIvCIUqitl1KrzsCI4ykg1rk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-short","session_youtube_ff_id":"6oCqQbTXScg","session_youtube_ff_link":"https://youtu.be/6oCqQbTXScg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=1h9m53s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T18:45:00Z","title":"Animating the Narrative: A Review of Animation Styles in Narrative Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20223193756","abstract":"Information visualization uses various types of representations to encode data into graphical formats. Prior work on visualization techniques has evaluated the accuracy of perceived numerical data values from visual data encodings such as graphical position, length, orientation, size, and color. Our work aims to extend the research of graphical perception to the use of motion as data encodings for quantitative values. We present two experiments implementing multiple fundamental aspects of motion such as type, speed, and synchronicity that can be used for numerical value encoding as well as comparing motion to static visual encodings in terms of user perception and accuracy. We studied how well users can assess the differences between several types of motion and static visual encodings and present an updated ranking of accuracy for quantitative judgments. Our results indicate that non-synchronized motion can be interpreted more quickly and more accurately than synchronized motion. Moreover, our ranking of static and motion visual representations shows that motion, especially expansion and translational types, has great potential as a data encoding technique for quantitative value. Finally, we discuss the implications for the use of animation and motion for numerical representations in data visualization.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Shaghayegh Esmaeili"},{"affiliations":"","email":"","is_corresponding":false,"name":"Samia Kabir"},{"affiliations":"","email":"","is_corresponding":false,"name":"Anthony M. Colas"},{"affiliations":"","email":"","is_corresponding":false,"name":"Rhema P. Linder"},{"affiliations":"","email":"","is_corresponding":false,"name":"Eric D. Ragan"}],"award":"","doi":"10.1109/TVCG.2022.3193756","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20223193756","image_caption":"This preview image compares static and motion-based data encoding techniques for quantitative values. The top row shows static encodings, including area, color, angle, position, and length. The bottom row illustrates dynamic motion encodings: expansion, vibration, flicker, and vertical motion. Arrows indicate the direction of movement, emphasizing the dynamic nature of these motion-based visualizations. The image highlights how different visual properties--both static and motion-based--can be used for graphical perception and accuracy in data interpretation. ","keywords":["Information visualization, animation and motion-related techniques, empirical study, graphical perception, evaluation."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20223193756/v-tvcg-20223193756_Preview.mp4?token=rnzL2n9Tqn4SX1r7OWdTjWOrzDRa2-ZKCQOPcHox-B4&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-tvcg","session_youtube_ff_id":"xUeevjCLhns","session_youtube_ff_link":"https://youtu.be/xUeevjCLhns","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=0h1m15s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T17:57:00Z","title":"Evaluating Graphical Perception of Visual Motion for Quantitative Data Encoding","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233341990","abstract":"We report on challenges and considerations for supporting design processes for visualizations in motion embedded in sports videos. We derive our insights from analyzing swimming race visualizations and motion-related data, building a technology probe, as well as a study with designers. Understanding how to design situated visualizations in motion is important for a variety of contexts. Competitive sports coverage, in particular, increasingly includes information on athlete or team statistics and records. Although moving visual representations attached to athletes or other targets are starting to appear, systematic investigations on how to best support their design process in the context of sports videos are still missing. Our work makes several contributions in identifying opportunities for visualizations to be added to swimming competition coverage but, most importantly, in identifying requirements and challenges for designing situated visualizations in motion. Our investigations include the analysis of a survey with swimming enthusiasts on their motion-related information needs, an ideation workshop to collect designs and elicit design challenges, the design of a technology probe that allows to create embedded visualizations in motion based on real data, and an evaluation with visualization designers that aimed to understand the benefits of designing directly on videos.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Lijie Yao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Romain Vuillemot"},{"affiliations":"","email":"","is_corresponding":false,"name":"Anastasia Bezerianos"},{"affiliations":"","email":"","is_corresponding":false,"name":"Petra Isenberg"}],"award":"","doi":"10.1109/TVCG.2023.3341990","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233341990","image_caption":"Embedded representations added to a swimming video of the 2021 French Championship using our technology probe. These show dynamically updating visualizations that move with the swimmers: distance to the leader and predicted winner (left), speed distance to a personal record (top right), and current speed and swimmers' ages (bottom right). The left and bottom right images also show stationary embedded representations of the swimmers' names, nationality, and elapsed time.","keywords":["Data visualization, Sports, Videos, Probes, Surveys, Authoring systems, Games, Design framework, Embedded visualization, Sports analytics, Visualization in motion"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233341990/v-tvcg-20233341990_Preview.mp4?token=UhovQpwtoTs04VevSR-D-3grQfz_P_1fjxzRIrcoJvg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233341990/v-tvcg-20233341990_Preview.srt?token=06mGz18pkPvSVcT0qmxfzeab_bc7Yvju_kHMUBDGMso&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-tvcg","session_youtube_ff_id":"lFf8sM52rMc","session_youtube_ff_link":"https://youtu.be/lFf8sM52rMc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=0h33m31s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T18:21:00Z","title":"Designing for Visualization in Motion: Embedding Visualizations in Swimming Videos","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1272","abstract":"In various scientific and industrial domains, analyzing multivariate spatial data, i.e., vectors associated with spatial locations, is common practice. To analyze those datasets, analysts may turn to methods such as Spatial Blind Source Separation (SBSS). Designed explicitly for spatial data analysis, SBSS finds latent components in the dataset and is superior to popular non-spatial methods, like PCA. However, when analysts try different tuning parameter settings, the amount of latent components complicates analytical tasks. Based on our years-long collaboration with SBSS researchers, we propose a visualization approach to tackle this challenge. The main component is UnDRground Tubes (UT), a general-purpose idiom combining ideas from set visualization and multidimensional projections. We describe the UT visualization pipeline and integrate UT into an interactive multiple-view system. We demonstrate its effectiveness through interviews with SBSS experts, a qualitative evaluation with visualization experts, and computational experiments. SBSS experts were excited about our approach. They saw many benefits for their work and potential applications for geostatistical data analysis more generally. UT was also well received by visualization experts. Our benchmarks show that UT projections and its heuristics are appropriate.","accessible_pdf":false,"authors":[{"affiliations":["TU Wien, Vienna, Austria"],"email":"nikolaus.piccolotto@tuwien.ac.at","is_corresponding":false,"name":"Nikolaus Piccolotto"},{"affiliations":["TU Wien, Vienna, Austria"],"email":"mwallinger@ac.tuwien.ac.at","is_corresponding":true,"name":"Markus Wallinger"},{"affiliations":["Institute of Visual Computing and Human-Centered Technology, Vienna, Austria"],"email":"miksch@ifs.tuwien.ac.at","is_corresponding":false,"name":"Silvia Miksch"},{"affiliations":["TU Wien, Vienna, Austria"],"email":"markus.boegl@tuwien.ac.at","is_corresponding":false,"name":"Markus B\u00f6gl"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1272","image_caption":"The main component of our visualization approach is UnDRground Tubes, which presents glyphs in a grid and connects them by lines according to their set memberships. ","keywords":["Geographical data, multivariate data, set visualization, visual cluster analysis."],"open_access_supplemental_link":"https://osf.io/c7yga/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/zgphx","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1272/v-full-1272_Preview.mp4?token=v8VZzcOwmzW0CadxHJpyhrgdUUg2hMCG4nfe7dEqYWw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1272/v-full-1272_Preview.srt?token=dukv0aGEH_NoG8urQc9zM3j2VN3RPsA_FK9_KDg_39w&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-full","session_youtube_ff_id":"JAizrYjsDB8","session_youtube_ff_link":"https://youtu.be/JAizrYjsDB8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=0h1m3s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T14:15:00Z","title":"UnDRground Tubes: Exploring Spatial Data With Multidimensional Projections and Set Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1568","abstract":"Dimensionality reduction techniques are widely used for visualizing high-dimensional data. However, support for interpreting patterns of dimension reduction results in the context of the original data space is often insufficient. Consequently, users may struggle to extract insights from the projections. In this paper, we introduce DimBridge, a visual analytics tool that allows users to interact with visual patterns in a projection and retrieve corresponding data patterns. DimBridge supports several interactions, allowing users to perform various analyses, from contrasting multiple clusters to explaining complex latent structures. Leveraging first-order predicate logic, DimBridge identifies subspaces in the original dimensions relevant to a queried pattern and provides an interface for users to visualize and interact with them. We demonstrate how DimBridge can help users overcome the challenges associated with interpreting visual patterns in projections.","accessible_pdf":false,"authors":[{"affiliations":["Tufts University, Medford, United States"],"email":"brianmontambault@gmail.com","is_corresponding":true,"name":"Brian Montambault"},{"affiliations":["Tufts University, Medford, United States"],"email":"gabriel.appleby@gmail.com","is_corresponding":false,"name":"Gabriel Appleby"},{"affiliations":["Tufts University, Boston, United States"],"email":"jen@cs.tufts.edu","is_corresponding":false,"name":"Jen Rogers"},{"affiliations":["Tufts University, Medford, United States"],"email":"camelia_daniela.brumar@tufts.edu","is_corresponding":false,"name":"Camelia D. Brumar"},{"affiliations":["Vanderbilt University, Nashville, United States"],"email":"mingwei.li@tufts.edu","is_corresponding":false,"name":"Mingwei Li"},{"affiliations":["Tufts University, Medford, United States"],"email":"remco@cs.tufts.edu","is_corresponding":false,"name":"Remco Chang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1568","image_caption":"DimBridge helps users understand visual patterns in dimensionality reduction-based 2D projections by identifying relevant subsets of the high-dimensional space.","keywords":["Predicates, Dimensionality Reduction, Explainable Machine Learning"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1568/v-full-1568_Preview.mp4?token=4rHbL9zypOPcv4QmTnYnprbl4RS8YStj6BPAK4rizBA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1568/v-full-1568_Preview.srt?token=SOI8J1E388R3ADgh6IACzWOJhwfdCSQSGl8g139fTR0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-full","session_youtube_ff_id":"tH3ik7KCn0A","session_youtube_ff_link":"https://youtu.be/tH3ik7KCn0A","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=0h27m3s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T14:39:00Z","title":"DimBridge: Interactive Explanation of Visual Patterns in Dimensionality Reductions with Predicate Logic","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1612","abstract":"Partitionings (or segmentations) divide a given domain into disjoint connected regions whose union forms again the entire domain. Multi-dimensional partitionings occur, for example, when analyzing parameter spaces of simulation models, where each segment of the partitioning represents a region of similar model behavior. Having computed a partitioning, one is commonly interested in understanding how large the segments are and which segments lie next to each other. While visual representations of 2D domain partitionings that reveal sizes and neighborhoods are straightforward, this is no longer the case when considering multi-dimensional domains of three or more dimensions. We propose an algorithm for computing 2D embeddings of multi-dimensional partitionings. The embedding shall have the following properties: It shall maintain the topology of the partitioning and optimize the area sizes and joint boundary lengths of the embedded segments to match the respective sizes and lengths in the multi-dimensional domain. We demonstrate the effectiveness of our approach by applying it to different use cases, including the visual exploration of 3D spatial domain segmentations and multi-dimensional parameter space partitionings of simulation ensembles. We numerically evaluate our algorithm with respect to how well sizes and lengths are preserved depending on the dimensionality of the domain and the number of segments. ","accessible_pdf":true,"authors":[{"affiliations":["University of M\u00fcnster, M\u00fcnster, Germany"],"email":"m_ever14@uni-muenster.de","is_corresponding":true,"name":"Marina Evers"},{"affiliations":["University of M\u00fcnster, M\u00fcnster, Germany"],"email":"linsen@uni-muenster.de","is_corresponding":false,"name":"Lars Linsen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1612","image_caption":"We present an approach for visualizing a multi-dimensional partitioning in a 2D embedding. Each segment in the embedding corresponds to a multi-dimensional segment of the given partitioning. A multi-dimensional partitioning is modeled as a graph that is embedded into a 2D plane. The graph embedding is used as a starting point for a cellular automaton approach to compute a 2D embedding of the multi-dimensional embedding preserving topology, area, and boundary length. To its outcome, we apply a rendering that highlights relevant features.","keywords":["Multi-dimensional partitionings, segmentations, dimensionality reduction, parameter space visualization."],"open_access_supplemental_link":"https://github.com/marinaevers/segmentation-projection","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2408.03641","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1612/v-full-1612_Preview.mp4?token=EUgVBfrye42pazKDBfY0V5t7uuwIUzDQgiF4rdtXwRw&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-full","session_youtube_ff_id":"91i3yDeIi38","session_youtube_ff_link":"https://youtu.be/91i3yDeIi38","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=0h38m37s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T14:51:00Z","title":"2D Embeddings of Multi-dimensional Partitionings","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1632","abstract":"High-dimensional data, characterized by many features, can be difficult to visualize effectively. Dimensionality reduction techniques, such as PCA, UMAP, and t-SNE, address this challenge by projecting the data into a lower-dimensional space while preserving important relationships. TopoMap is another technique that excels at preserving the underlying structure of the data, leading to interpretable visualizations. In particular, TopoMap maps the high-dimensional data into a visual space, guaranteeing that the 0-dimensional persistence diagram of the Rips filtration of the visual space matches the one from the high-dimensional data. However, the original TopoMap algorithm can be slow and its layout can be too sparse for large and complex datasets. In this paper, we propose three improvements to TopoMap: 1) a more space-efficient layout, 2) a significantly faster implementation, and 3) a novel TreeMap-based representation that makes use of the topological hierarchy to aid the exploration of the projections.These advancements make TopoMap, now referred to as TopoMap++, a more powerful tool for visualizing high-dimensional data which we demonstrate through different use case scenarios.","accessible_pdf":false,"authors":[{"affiliations":["New York University, New York City, United States"],"email":"vitoriaguardieiro@gmail.com","is_corresponding":true,"name":"Vitoria Guardieiro"},{"affiliations":["New York University, New York City, United States"],"email":"felipedeoliveira1407@gmail.com","is_corresponding":false,"name":"Felipe Inagaki de Oliveira"},{"affiliations":["Microsoft Research India, Bangalore, India"],"email":"harish.doraiswamy@microsoft.com","is_corresponding":false,"name":"Harish Doraiswamy"},{"affiliations":["University of Sao Paulo, Sao Carlos, Brazil"],"email":"gnonato@icmc.usp.br","is_corresponding":false,"name":"Luis Gustavo Nonato"},{"affiliations":["New York University, New York City, United States"],"email":"csilva@nyu.edu","is_corresponding":false,"name":"Claudio Silva"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1632","image_caption":"Representations of the MNIST database of handwritten digits. (a) This data is projected using TopoMap. (b) The hierarchy defined by the process of topological simplification is visualized as a TreeMap. Each leaf of this tree corresponds to the smallest simplified component with a user-defined minimum number of points. (c) The TopoMap++ representation of the same data where the eleven components selected by the TreeMap are highlighted. As can be seen, TopoMap++ makes much more efficient use of the space compared to TopoMap, thus allowing users to easily analyze the relationships between the different clusters. ","keywords":["Topological data analysis, Computational topology, High-dimensional data, Projection."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1632/v-full-1632_Preview.mp4?token=NL73IBsAgaCsJAwcWrdzocAOZp8ABJQsqvlxLuVM_aE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1632/v-full-1632_Preview.srt?token=nnuxA1POcFJtGJo3RfG9Z26Htjb_KbUYaTm9HT9mwH0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-full","session_youtube_ff_id":"RHAnJMEbOOQ","session_youtube_ff_link":"https://youtu.be/RHAnJMEbOOQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=1h3m43s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T15:15:00Z","title":"TopoMap++: A faster and more space efficient technique to compute projections with topological guarantees","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233324851","abstract":"Dimensionality reduction (DR) algorithms are diverse and widely used for analyzing high-dimensional data. Various metrics and tools have been proposed to evaluate and interpret the DR results. However, most metrics and methods fail to be well generalized to measure any DR results from the perspective of original distribution fidelity or lack interactive exploration of DR results. There is still a need for more intuitive and quantitative analysis to interactively explore high-dimensional data and improve interpretability. We propose a metric and a generalized algorithm-agnostic approach based on the concept of capacity to evaluate and analyze the DR results. Based on our approach, we develop a visual analytic system HiLow for exploring high-dimensional data and projections. We also propose a mixed-initiative recommendation algorithm that assists users in interactively DR results manipulation. Users can compare the differences in data distribution after the interaction through HiLow. Furthermore, we propose a novel visualization design focusing on quantitative analysis of differences between high and low-dimensional data distributions. Finally, through user study and case studies, we validate the effectiveness of our approach and system in enhancing the interpretability of projections and analyzing the distribution of high and low-dimensional data.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Yang Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jisheng Liu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chufan Lai"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yuan Zhou"},{"affiliations":"","email":"","is_corresponding":true,"name":"Siming Chen"}],"award":"","doi":"10.1109/TVCG.2023.3324851","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233324851","image_caption":"Dimensionality reduction (DR) algorithms are diverse and widely used for analyzing high-dimensional data. We propose a metric and a generalized algorithm-agnostic approach based on the concept of capacity to evaluate and analyze the DR results. Based on our approach, we develop a visual analytic system HiLow for exploring high-dimensional data and projections. We also propose a mixed-initiative recommendation algorithm that assists users in interactively DR results manipulation. Users can compare the differences in data distribution after the interaction through HiLow. Furthermore, we propose a novel visualization design focusing on quantitative analysis of differences between high and low-dimensional data distributions. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233324851/v-tvcg-20233324851_Preview.mp4?token=DkmstbWOPNnXDNMazsNNAd6L4fkboZage8zf-mcmILc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233324851/v-tvcg-20233324851_Preview.srt?token=qwSdAz2hsKoC0m49tsZsR13eFd7B9mGqYLf3c4o_7MM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-tvcg","session_youtube_ff_id":"q2ETleQA0KE","session_youtube_ff_link":"https://youtu.be/q2ETleQA0KE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=0h14m13s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T14:27:00Z","title":"Interpreting High-Dimensional Projections With Capacity","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243364841","abstract":"The need to understand the structure of hierarchical or high-dimensional data is present in a variety of fields. Hyperbolic spaces have proven to be an important tool for embedding computations and analysis tasks as their non-linear nature lends itself well to tree or graph data. Subsequently, they have also been used in the visualization of high-dimensional data, where they exhibit increased embedding performance. However, none of the existing dimensionality reduction methods for embedding into hyperbolic spaces scale well with the size of the input data. That is because the embeddings are computed via iterative optimization schemes and the computation cost of every iteration is quadratic in the size of the input. Furthermore, due to the non-linear nature of hyperbolic spaces, Euclidean acceleration structures cannot directly be translated to the hyperbolic setting. This paper introduces the first acceleration structure for hyperbolic embeddings, building upon a polar quadtree. We compare our approach with existing methods and demonstrate that it computes embeddings of similar quality in significantly less time. Implementation and scripts for the experiments can be found at this https URL.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Martin Skrodzki"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hunter van Geffen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nicolas F. Chaves-de-Plaza"},{"affiliations":"","email":"","is_corresponding":false,"name":"Thomas H\u00f6llt"},{"affiliations":"","email":"","is_corresponding":false,"name":"Elmar Eisemann"},{"affiliations":"","email":"","is_corresponding":false,"name":"Klaus Hildebrandt"}],"award":"","doi":"10.1109/TVCG.2024.3364841","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243364841","image_caption":"An embedding of the C.Elegans data set with colored clusters on the right. Left shows an overlay of our tree acceleration structure. The red mark indicates the query point where the grid resolution is high, whereas it is low everywhere else in the embedding. This speeds up embedding computations significantly.","keywords":["Human-Computer Interaction (cs.HC); Artificial Intelligence (cs.AI); Machine Learning (cs.LG); Quantitative Methods (q-bio.QM); Machine Learning (stat.ML) Dimensionality reduction, t-SNE, hyperbolic embedding, acceleration structure"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243364841/v-tvcg-20243364841_Preview.mp4?token=OvZmub6iUHlYHZyKQa-ZeE_96Y2orscgxFdhA1RHjg4&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-tvcg","session_youtube_ff_id":"QwwSaWLUn_c","session_youtube_ff_link":"https://youtu.be/QwwSaWLUn_c","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=0h51m40s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T15:03:00Z","title":"Accelerating hyperbolic t-SNE","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1153","abstract":"Points of interest on a map such as restaurants, hotels, or subway stations, give rise to categorical point data: data that have a fixed location and one or more categorical attributes. Consequently, recent years have seen various set visualization approaches that visually connect points of the same category to support users in understanding the spatial distribution of categories. Existing methods use complex and often highly irregular shapes to connect points of the same category, leading to high cognitive load for the user. In this paper we introduce SimpleSets, which uses simple shapes to enclose categorical point patterns, thereby providing a clean overview of the data distribution. SimpleSets is designed to visualize sets of points with a single categorical attribute; as a result, the point patterns enclosed by SimpleSets form a partition of the data. We give formal definitions of point patterns that correspond to simple shapes and describe an algorithm that partitions categorical points into few such patterns. Our second contribution is a rendering algorithm that transforms a given partition into a clean set of shapes resulting in an aesthetically pleasing set visualization. Our algorithm pays particular attention to resolving intersections between nearby shapes in a consistent manner. We compare SimpleSets to the state-of-the-art set visualizations using standard datasets from the literature.","accessible_pdf":false,"authors":[{"affiliations":["TU Eindhoven, Eindhoven, Netherlands"],"email":"s.w.v.d.broek@tue.nl","is_corresponding":true,"name":"Steven van den Broek"},{"affiliations":["TU Eindhoven, Eindhoven, Netherlands"],"email":"w.meulemans@tue.nl","is_corresponding":false,"name":"Wouter Meulemans"},{"affiliations":["TU Eindhoven, Eindhoven, Netherlands"],"email":"b.speckmann@tue.nl","is_corresponding":false,"name":"Bettina Speckmann"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1153","image_caption":"A SimpleSets visualization of mills around Leeuwarden, The Netherlands. The mill types are: angular mill (blue); vertical wind engine (green); spider head mill (orange); and tjasker (purple). Data by https://molendatabase.nl with permission, map from https://www.openstreetmap.org/copyright.","keywords":["Set visualization, geographic visualization, algorithms"],"open_access_supplemental_link":"https://doi.org/10.5281/zenodo.12784670","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14433","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1153/v-full-1153_Preview.mp4?token=Z6BFddSYmiI-39mV2cow1IHd-5BfUyXrkwS1rpLjwqs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1153/v-full-1153_Preview.srt?token=29nzWoOd9pyyM4jTtZLCrsKC0TNf4f_WXUBk5eZN3cM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-full","session_youtube_ff_id":"vZk9Sm6PIIo","session_youtube_ff_link":"https://youtu.be/vZk9Sm6PIIo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=1h4m2s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T15:15:00Z","title":"SimpleSets: Capturing Categorical Point Patterns with Simple Shapes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1307","abstract":"Building Information Modeling (BIM) describes a central data pool covering the entire life cycle of a construction project. Similarly, Building Energy Modeling (BEM) describes the process of using a 3D representation of a building as a basis for thermal simulations to assess the building\u2019s energy performance. This paper explores the intersection of BIM and BEM, focusing on the challenges and methodologies in converting BIM data into BEM representations for energy performance analysis. BEMTrace integrates 3D data wrangling techniques with visualization methodologies to enhance the accuracy and traceability of the BIM-to-BEM conversion process. Through parsing, error detection, and algorithmic correction of BIM data, our methods generate valid BEM models suitable for energy simulation. Visualization techniques provide transparent insights into the conversion process, aiding error identification, validation, and user comprehension. We introduce context-adaptive selections to facilitate user interaction and to show that the BEMTrace workflow helps users understand complex 3D data wrangling processes.","accessible_pdf":true,"authors":[{"affiliations":["VRVis Zentrum f\u00fcr Virtual Reality und Visualisierung Forschungs-GmbH, Vienna, Austria"],"email":"walch@vrvis.at","is_corresponding":true,"name":"Andreas Walch"},{"affiliations":["VRVis Zentrum f\u00fcr Virtual Reality und Visualisierung Forschungs-GmbH, Vienna, Austria"],"email":"szabo@vrvis.at","is_corresponding":false,"name":"Attila Szabo"},{"affiliations":["VRVis Zentrum f\u00fcr Virtual Reality und Visualisierung Forschungs-GmbH, Vienna, Austria"],"email":"hs@vrvis.at","is_corresponding":false,"name":"Harald Steinlechner"},{"affiliations":["Independent Researcher, Vienna, Austria"],"email":"thomas@ortner.fyi","is_corresponding":false,"name":"Thomas Ortner"},{"affiliations":["Institute of Visual Computing "," Human-Centered Technology, Vienna, Austria"],"email":"groeller@cg.tuwien.ac.at","is_corresponding":false,"name":"Eduard Gr\u00f6ller"},{"affiliations":["VRVis Zentrum f\u00fcr Virtual Reality und Visualisierung Forschungs-GmbH, Vienna, Austria"],"email":"johanna.schmidt@vrvis.at","is_corresponding":false,"name":"Johanna Schmidt"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1307","image_caption":"BEMTrace enhances the data curation process from a Building Information Model (BIM) to a Building Energy Model (BEM) by providing visual support for the BIM-to-BEM conversion. Users can access various views to better understand the complex data transformation, including the BIM World, BEM World, and the Relationship View, which illustrates the transition between them. Context-adaptive selections assist users in navigating these views, allowing for detailed exploration of different data aspects. This approach ensures a clearer understanding of the conversion process and helps in resolving any arising conflicts.","keywords":["BIM, BEM, BIM-to-BEM, 3D Data Wrangling, 3D selections, Visualization for trust building"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.19464","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1307/v-full-1307_Preview.mp4?token=lopxc4Sly_btDsX1Mo-dcgzS-HTbPGfRVSfPfSX0QbI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1307/v-full-1307_Preview.srt?token=jJddu6bRbUOyN7zSlfWx89aCEUqHMLLlhABaav6_M6w&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-full","session_youtube_ff_id":"AwIuPtpFz-k","session_youtube_ff_link":"https://youtu.be/AwIuPtpFz-k","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=0h14m42s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T14:27:00Z","title":"BEMTrace: Visualization-driven approach for deriving Building Energy Models from BIM","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1681","abstract":"In recent years, the global adoption of electric vehicles (EVs) has surged, prompting a corresponding rise in the installation of charging stations. This proliferation has underscored the importance of expediting the deployment of charging infrastructure. Both academia and industry have thus devoted to addressing the charging station location problem (CSLP) to streamline this process. However, prevailing algorithms addressing CSLP are hampered by restrictive assumptions and computational overhead, leading to a dearth of comprehensive evaluations in the spatiotemporal dimensions. Consequently, their practical viability is restricted. Moreover, the placement of charging stations exerts a significant impact on both the road network and the power grid, which necessitates the evaluation of the potential post-deployment impacts on these interconnected networks holistically. In this study, we propose CSLens, a visual analytics system designed to inform charging station deployment decisions through the lens of coupled transportation and power networks. CSLens offers multiple visualizations and interactive features, empowering users to delve into the existing charging station layout, explore alternative deployment solutions, and assess the ensuring impact. To validate the efficacy of CSLens, we conducted two case studies and engaged in interviews with domain experts. Through these efforts, we substantiated the usability and practical utility of CSLens in enhancing the decision-making process surrounding charging station deployment. Our findings underscore CSLens\u2019s potential to serve as a valuable asset in navigating the complexities of charging infrastructure planning.","accessible_pdf":false,"authors":[{"affiliations":["Sun Yat-sen University, Shenzhen, China"],"email":"zhangyt85@mail2.sysu.edu.cn","is_corresponding":true,"name":"Yutian Zhang"},{"affiliations":["Sun Yat-sen University, Shenzhen, China"],"email":"xulw8@mail2.sysu.edu.cn","is_corresponding":false,"name":"Liwen Xu"},{"affiliations":["Sun Yat-sen University, Shenzhen, China"],"email":"taoshc@mail2.sysu.edu.cn","is_corresponding":false,"name":"Shaocong Tao"},{"affiliations":["Sun Yat-sen University, Shenzhen, China"],"email":"guanqx3@mail.sysu.edu.cn","is_corresponding":false,"name":"Quanxue Guan"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"liquan@shanghaitech.edu.cn","is_corresponding":false,"name":"Quan Li"},{"affiliations":["Sun Yat-sen University, Shenzhen, China"],"email":"zenghp5@mail.sysu.edu.cn","is_corresponding":false,"name":"Haipeng Zeng"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1681","image_caption":"CSLens facilitates the implementation of new charging stations within the coupled transportation and power networks. The Temporal Overview (A) analyzes the fluctuations in traffic hotspots and charging demand. In the Control Panel (B), users can adjust parameters to generate solutions for charging station deployment. The Charging Station Info (C) provides key attributes of charging stations. The Map View (D) furnishes detailed information on traffic volume, charging demand and charging stations. The Result View (E) and the Impact View (F) enable users to compare various solutions and evaluate their respective impacts on the road network and the power grid.","keywords":["Charging station location problem, Visual analytics, Decision-making"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1681/v-full-1681_Preview.mp4?token=3S6TVYPNim5-H1pVaXrfj9MMJ5GUV-fnBLAlWj5Cu9Y&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-full","session_youtube_ff_id":"qZcYIS995YE","session_youtube_ff_link":"https://youtu.be/qZcYIS995YE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=0h52m31s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T15:03:00Z","title":"CSLens: Towards Better Deploying Charging Stations via Visual Analytics \u2014\u2014 A Coupled Networks Perspective","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233332511","abstract":"We present Submerse, an end-to-end framework for visualizing flooding scenarios on large and immersive display ecologies. Specifically, we reconstruct a surface mesh from input flood simulation data and generate a to-scale 3D virtual scene by incorporating geographical data such as terrain, textures, buildings, and additional scene objects. To optimize computation and memory performance for large simulation datasets, we discretize the data on an adaptive grid using dynamic quadtrees and support level-of-detail based rendering. Moreover, to provide a perception of flooding direction for a time instance, we animate the surface mesh by synthesizing water waves. As interaction is key for effective decision-making and analysis, we introduce two novel techniques for flood visualization in immersive systems: (1) an automatic scene-navigation method using optimal camera viewpoints generated for marked points-of-interest based on the display layout, and (2) an AR-based focus+context technique using an aux display system. Submerse is developed in collaboration between computer scientists and atmospheric scientists. We evaluate the effectiveness of our system and application by conducting workshops with emergency managers, domain experts, and concerned stakeholders in the Stony Brook Reality Deck, an immersive gigapixel facility, to visualize a superstorm flooding scenario in New York City.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Saeed Boorboor"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yoonsang Kim"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ping Hu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Josef Moses"},{"affiliations":"","email":"","is_corresponding":false,"name":"Brian Colle"},{"affiliations":"","email":"","is_corresponding":false,"name":"Arie E. Kaufman"}],"award":"","doi":"10.1109/TVCG.2023.3332511","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233332511","image_caption":"Submerse is an end-to-end framework for visualizing flooding scenarios on large and immersive display ecologies. It generates a to-scale 3D virtual scene by incorporating flood simulation data and geographical data such as terrain, textures, buildings, and additional scene objects. Submerse implements two novel techniques: (1) an automatic scene-navigation method using optimal camera viewpoints generated for marked points-of-interest based on the display layout, and (2) an AR-based focus+context technique using an aux display system. We demonstrate the system on the Stony Brook University Reality Deck.","keywords":["Camera navigation, flooding simulation visualization, immersive visualization, mixed reality"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233332511/v-tvcg-20233332511_Preview.mp4?token=x3Vbv2n6-cRii7JvFIx0dtFeY7hm1Yazmcnx8QrNYLo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233332511/v-tvcg-20233332511_Preview.srt?token=73so2ph6C7OcvEkc3GLhO-aehnd0Uv3yiXNlL1dR584&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-tvcg","session_youtube_ff_id":"CjTHaJsd0-8","session_youtube_ff_link":"https://youtu.be/CjTHaJsd0-8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=0h0m27s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T14:15:00Z","title":"Submerse: Visualizing Storm Surge Flooding Simulations in Immersive Display Ecologies","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233333356","abstract":"As urban populations grow, effectively accessing urban performance measures such as livability and comfort becomes increasingly important due to their significant socioeconomic impacts. While Point of Interest (POI) data has been utilized for various applications in location-based services, its potential for urban performance analytics remains unexplored. In this paper, we present SenseMap, a novel approach for analyzing urban performance by leveraging POI data as a semantic representation of urban functions. We quantify the contribution of POIs to different urban performance measures by calculating semantic textual similarities on our constructed corpus. We propose Semantic-adaptive Kernel Density Estimation which takes into account POIs\u2019 in\ufb02uential areas across different Traf\ufb01c Analysis Zones and semantic contributions to generate semantic density maps for measures. We design and implement a feature-rich, real-time visual analytics system for users to explore the urban performance of their surroundings. Evaluations with human judgment and reference data demonstrate the feasibility and validity of our method. Usage scenarios and user studies demonstrate the capability, usability, and explainability of our system.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Juntong Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Qiaoyun Huang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Changbo Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chenhui Li"}],"award":"","doi":"10.1109/TVCG.2023.3333356","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233333356","image_caption":"The user interface of SenseMap: A. The map view in exploration and filter states, displaying semantic maps, circular query targets, and filtered regions; B. The navigation view, enabling adjustments to regional query parameters and navigation between POIs; C. The comparison view facilitates the comparison and analysis of measures across urban areas.","keywords":["Urban data, semantic textual similarity, point of interest, density map, visual analytics, visualization design"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233333356/v-tvcg-20233333356_Preview.mp4?token=sneOhA3BxgnybyDtxPqOZGsuH5Gc6aaiCekUKYQBXkE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233333356/v-tvcg-20233333356_Preview.srt?token=EQI69AJ6rvTxPtwm3YQQlPjXgjGRLfZ88SnDz_dMKO0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-tvcg","session_youtube_ff_id":"S-OPwGCXsMo","session_youtube_ff_link":"https://youtu.be/S-OPwGCXsMo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=0h39m32s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T14:51:00Z","title":"SenseMap: Urban Performance Visualization and Analytics via Semantic Textual Similarity","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243392587","abstract":"The issue of traffic congestion poses a significant obstacle to the development of global cities. One promising solution to tackle this problem is intelligent traffic signal control (TSC). Recently, TSC strategies leveraging reinforcement learning (RL) have garnered attention among researchers. However, the evaluation of these models has primarily relied on fixed metrics like reward and queue length. This limited evaluation approach provides only a narrow view of the model\u2019s decision-making process, impeding its practical implementation. Moreover, effective TSC necessitates coordinated actions across multiple intersections. Existing visual analysis solutions fall short when applied in multi-agent settings. In this study, we delve into the challenge of interpretability in multi-agent reinforcement learning (MARL), particularly within the context of TSC. We propose MARLens, a visual analytics system tailored to understand MARL-based TSC. Our system serves as a versatile platform for both RL and TSC researchers. It empowers them to explore the model\u2019s features from various perspectives, revealing its decision-making processes and shedding light on interactions among different agents. To facilitate quick identification of critical states, we have devised multiple visualization views, complemented by a traffic simulation module that allows users to replay specific training scenarios. To validate the utility of our proposed system, we present three comprehensive case studies, incorporate insights from domain experts through interviews, and conduct a user study. These collective efforts underscore the feasibility and effectiveness of MARLens in enhancing our understanding of MARL-based TSC systems and pave the way for more informed and efficient traffic management strategies.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yutian Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Guohong Zheng"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zhiyuan Liu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Quan Li"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haipeng Zeng"}],"award":"","doi":"10.1109/TVCG.2024.3392587","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243392587","image_caption":"MARLens provides an in-depth analysis of reinforcement-learning-based traffic signal control. The Control Panel (A) presents parameters of the model. The Training Distribution (B) provides the distribution of metrics and ranks episodes. The Episode Overview (C) summarizes the traffic conditions and agents' policies at a certain episode. The Episode Detail (D) provides a summary for each agent in an episode, including states, actions and relationships among agents. The Policy Explainer (E) provides explanations between state and action. The Simulation Replay (F) supports the replay of an episode or time step. The Snapshot Log (G) saves the snapshots of the Policy Explainer.","keywords":["Traffic signal control, multi-agent, reinforcement learning, visual analytics"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243392587/v-tvcg-20243392587_Preview.mp4?token=y-4GRT4qfjHVR1kac6qvxMnL7ooqMxs-3oOePS5YQJs&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-tvcg","session_youtube_ff_id":"vGdbrKKW2V8","session_youtube_ff_link":"https://youtu.be/vGdbrKKW2V8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=0h26m35s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T14:39:00Z","title":"MARLens: Understanding Multi-agent Reinforcement Learning for Traffic Signal Control via Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1032","abstract":"Dynamic topic modeling is useful at discovering the development and change in latent topics over time. However, present methodology relies on algorithms that separate document and word representations. This prevents the creation of a meaningful embedding space where changes in word usage and documents can be directly analyzed in a temporal context. This paper proposes an expansion of the compass-aligned temporal Word2Vec methodology into dynamic topic modeling. Such a method allows for the direct comparison of word and document embeddings across time in dynamic topics. This enables the creation of visualizations that incorporate temporal word embeddings within the context of documents into topic visualizations. In experiments against the current state-of-the-art, our proposed method demonstrates overall competitive performance in topic relevancy and diversity across temporal datasets of varying size. Simultaneously, it provides insightful visualizations focused on temporal word embeddings while maintaining the insights provided by global topic evolution, advancing our understanding of how topics evolve over time.","accessible_pdf":false,"authors":[{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"d4n1elp@vt.edu","is_corresponding":true,"name":"Daniel Palamarchuk"},{"affiliations":["Virginia Polytechnic Institute of Technology , Blacksburg, United States"],"email":"lemaraw@vt.edu","is_corresponding":false,"name":"Lemara Williams"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"bmayer@cs.vt.edu","is_corresponding":false,"name":"Brian Mayer"},{"affiliations":["Savannah River National Laboratory, Aiken, United States"],"email":"thomas.danielson@srnl.doe.gov","is_corresponding":false,"name":"Thomas Danielson"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"rfaust1@tulane.edu","is_corresponding":false,"name":"Rebecca Faust"},{"affiliations":["Savannah River National Laboratory, Aiken, United States"],"email":"larry.deschaine@srnl.doe.gov","is_corresponding":false,"name":"Larry M Deschaine PhD"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"north@vt.edu","is_corresponding":false,"name":"Chris North"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1032","image_caption":"We present the dynamic topic modeling method called Temporal Topic Embeddings with a Compass. The top-right image illustrates how this method effectively generates a plot of term movements within the context of documents and their associated topics. The outer image showcases TimeLink, a tool that compares word vectors in both global and local topic contexts. The red boxes correspond to the respective time periods: the time represented in the scatterplot and where that time is represented in the Sankey diagram.","keywords":["High dimensional data, Dynamic topic modeling, Cluster analysis"],"open_access_supplemental_link":"https://github.com/danilka4/ttec","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1032/v-full-1032_Preview.mp4?token=1legcsYWlMjPEyQIyMzGdETw2sOSGdikDiSu2YZj514&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1032/v-full-1032_Preview.srt?token=qsKLVKCIpveAxu1_viLv_l88bBusuLDQvhSGZpd_8uc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-full","session_youtube_ff_id":"49ktTLyplJc","session_youtube_ff_link":"https://youtu.be/49ktTLyplJc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=0h0m56s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T12:30:00Z","title":"Visualizing Temporal Topic Embeddings with a Compass","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1128","abstract":"Citations allow quickly identifying related research. If multiple publications are selected as seeds, specific suggestions for related literature can be made based on the number of incoming and outgoing citation links to this selection. Interactively adding recommended publications to the selection refines the next suggestion and incrementally builds a relevant collection of publications. Following this approach, the paper presents a search and foraging approach, PUREsuggest, which combines citation-based suggestions with augmented visualizations of the citation network. The focus and novelty of the approach is, first, the transparency of how the rankings are explained visually and, second, that the process can be steered through user-defined keywords, which reflect topics of interests. The system can be used to build new literature collections, to update and assess existing ones, as well as to use the collected literature for identifying relevant experts in the field. We evaluated the recommendation approach through simulated sessions and performed a user study investigating search strategies and usage patterns supported by the interface.","accessible_pdf":true,"authors":[{"affiliations":["University of Bamberg, Bamberg, Germany"],"email":"fabian.beck@uni-bamberg.de","is_corresponding":true,"name":"Fabian Beck"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1128","image_caption":"The figure showcases the PUREsuggest interface, a tool designed for citation-based literature search and visual exploration. The interface includes three main components: a list of currently selected publications, a list of suggested publications based on citation links, and a visualization of the citation network. Users can refine searches by adding publications and entering custom keywords to amplify specific research topics, facilitating an interactive and dynamic approach to discovering relevant literature.","keywords":["Scientific literature search, citation network visualization, visual recommender system."],"open_access_supplemental_link":"https://osf.io/94ebr/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.02508","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1128/v-full-1128_Preview.mp4?token=xhnURLvMH8Q9yodeDEVJde2vWGBMegY00mSFQlcFmj4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1128/v-full-1128_Preview.srt?token=v7vyN82iJ7g1kfkcvAnJRGj2dKDfBMOaUHZsR00BN3s&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-full","session_youtube_ff_id":"obWhz2SJuzg","session_youtube_ff_link":"https://youtu.be/obWhz2SJuzg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=0h52m54s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T13:18:00Z","title":"PUREsuggest: Citation-based Literature Search and Visual Exploration with Keyword-controlled Rankings","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1489","abstract":"Projecting high-dimensional vectors into two dimensions for visualization, known as embedding visualization, facilitates perceptual reasoning and interpretation. Comparing multiple embedding visualizations drives decision-making in many domains, but traditional comparison methods are limited by a reliance on direct point correspondences. This requirement precludes comparisons without point correspondences, such as two different datasets of annotated images, and fails to capture meaningful higher-level relationships among point groups. To address these shortcomings, we propose a general framework for comparing embedding visualizations based on shared class labels rather than individual points. Our approach partitions points into regions corresponding to three key class concepts\u2014confusion, neighborhood, and relative size\u2014to characterize intra- and inter-class relationships. Informed by a preliminary user study, we implemented our framework using perceptual neighborhood graphs to define these regions and introduced metrics to quantify each concept.We demonstrate the generality of our framework with usage scenarios from machine learning and single-cell biology, highlighting our metrics' ability to draw insightful comparisons across label hierarchies. To assess the effectiveness of our approach, we conducted an evaluation study with five machine learning researchers and six single-cell biologists using an interactive and scalable prototype built with Python, JavaScript, and Rust. Our metrics enable more structured comparisons through visual guidance and increased participants\u2019 confidence in their findings.","accessible_pdf":true,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"trevor_manz@g.harvard.edu","is_corresponding":true,"name":"Trevor Manz"},{"affiliations":["Ozette Technologies, Seattle, United States"],"email":"f.lekschas@gmail.com","is_corresponding":false,"name":"Fritz Lekschas"},{"affiliations":["Ozette Technologies, Seattle, United States"],"email":"palmergreene@gmail.com","is_corresponding":false,"name":"Evan Greene"},{"affiliations":["Ozette Technologies, Seattle, United States"],"email":"greg@ozette.com","is_corresponding":false,"name":"Greg Finak"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1489","image_caption":"Our framework addresses limitations in traditional embedding visualization comparisons by focusing on shared class labels rather than individual point correspondences. We characterize intra- and inter-class relationships through three key concepts: confusion, neighborhood, and relative size. Here, we contrast standard and transformed UMAP projections of biological data, showcasing healthy tissue vs cancer tissue embedding visualizations. Central panes with quantitative color encoding illustrate how our metrics quantify these concepts and guide comparisons exploration. This approach enables structured comparisons of diverse datasets, as demonstrated with machine learning and single-cell biology examples. Our interactive prototype facilitates insightful analysis of high-dimensional data projections, enhancing researchers' interpretation and confidence in their findings. ","keywords":["visualization, comparison, high-dimensional data, dimensionality reduction, embeddings"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/puxnf","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1489/v-full-1489_Preview.mp4?token=r1czoxoMzgDBguv0DW_GGZSz9mE6Bydk9Vp-An3d2Is&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1489/v-full-1489_Preview.srt?token=bKAB3CZKUX_E4F02E59-aU0wJpguBNSh2AaQK1cctqA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-full","session_youtube_ff_id":"NOQMkUdisUc","session_youtube_ff_link":"https://youtu.be/NOQMkUdisUc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=0h13m34s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T12:42:00Z","title":"A General Framework for Comparing Embedding Visualizations Across Class-Label Hierarchies","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1603","abstract":"Multi-modal embeddings form the foundation for vision-language models, such as CLIP embeddings, the most widely used text-image embeddings. However, these embeddings are vulnerable to subtle misalignment of cross-modal features, resulting in decreased model performance and diminished generalization. To address this problem, we design ModalChorus, an interactive system for visual probing and alignment of multi-modal embeddings. ModalChorus primarily offers a two-stage process: 1) embedding probing with Modal Fusion Map (MFM), a novel parametric dimensionality reduction method that integrates both metric and nonmetric objectives to enhance modality fusion; and 2) embedding alignment that allows users to interactively articulate intentions for both point-set and set-set alignments. Quantitative and qualitative comparisons for CLIP embeddings with existing dimensionality reduction (e.g., t-SNE and MDS) and data fusion (e.g., data context map) methods demonstrate the advantages of MFM in showcasing cross-modal features over common vision-language datasets. Case studies reveal that ModalChorus can facilitate intuitive discovery of misalignment and efficient re-alignment in scenarios ranging from zero-shot classification to cross-modal retrieval and generation.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"yyebd@connect.ust.hk","is_corresponding":true,"name":"Yilin Ye"},{"affiliations":["The Hong Kong University of Science and Technology(Guangzhou), Guangzhou, China"],"email":"sxiao713@connect.hkust-gz.edu.cn","is_corresponding":false,"name":"Shishi Xiao"},{"affiliations":["the Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"xingchen.zeng@outlook.com","is_corresponding":false,"name":"Xingchen Zeng"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China","The Hong Kong University of Science and Technology, Hong Kong SAR, China"],"email":"weizeng@hkust-gz.edu.cn","is_corresponding":false,"name":"Wei Zeng"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1603","image_caption":"ModalChorus supports multi-modal embeddings visualization with Modal Fusion Map and interactive alignment.","keywords":["Multi-modal embeddings, dimensionality reduction, data fusion, interactive alignment"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.12315","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1603/v-full-1603_Preview.mp4?token=U2SeRph0CmCty6EkL1awqGasK-mUH3IZYpTsKqatksE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-full","session_youtube_ff_id":"oJrEG0FkEYw","session_youtube_ff_link":"https://youtu.be/oJrEG0FkEYw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=0h28m21s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T12:54:00Z","title":"ModalChorus: Visual Probing and Alignment of Multi-modal Embeddings via Modal Fusion Map","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1770","abstract":"The semantic similarity between documents of a text corpus can be visualized using map-like metaphors based on two-dimensional scatterplot layouts. These layouts result from a dimensionality reduction on the document-term matrix or a representation within a latent embedding, including topic models. Thereby, the resulting layout depends on the input data and hyperparameters of the dimensionality reduction and is therefore affected by changes in them. Furthermore, the resulting layout is affected by changes in the input data and hyperparameters of the dimensionality reduction. However, such changes to the layout require additional cognitive efforts from the user. In this work, we present a sensitivity study that analyzes the stability of these layouts concerning (1) changes in the text corpora, (2) changes in the hyperparameter, and (3) randomness in the initialization. Our approach has two stages: data measurement and data analysis. First, we derived layouts for the combination of three text corpora and six text embeddings and a grid-search-inspired hyperparameter selection of the dimensionality reductions. Afterward, we quantified the similarity of the layouts through ten metrics, concerning local and global structures and class separation. Second, we analyzed the resulting 42817 tabular data points in a descriptive statistical analysis. From this, we derived guidelines for informed decisions on the layout algorithm and highlight specific hyperparameter settings. We provide our implementation as a Git repository at https://github.com/hpicgs/Topic-Models-and-Dimensionality-Reduction-Sensitivity-Study and results as Zenodo archive at https://doi.org/10.5281/zenodo.12772898.","accessible_pdf":false,"authors":[{"affiliations":["University of Potsdam, Digital Engineering Faculty, Hasso Plattner Institute, Potsdam, Germany"],"email":"daniel.atzberger@hpi.de","is_corresponding":true,"name":"Daniel Atzberger"},{"affiliations":["University of Potsdam, Potsdam, Germany"],"email":"tcech@uni-potsdam.de","is_corresponding":false,"name":"Tim Cech"},{"affiliations":["Hasso Plattner Institute, Faculty of Digital Engineering, University of Potsdam, Potsdam, Germany"],"email":"willy.scheibel@hpi.de","is_corresponding":false,"name":"Willy Scheibel"},{"affiliations":["Hasso Plattner Institute, Faculty of Digital Engineering, University of Potsdam, Potsdam, Germany"],"email":"juergen.doellner@hpi.de","is_corresponding":false,"name":"J\u00fcrgen D\u00f6llner"},{"affiliations":["Utrecht University, Utrecht, Netherlands"],"email":"m.behrisch@uu.nl","is_corresponding":false,"name":"Michael Behrisch"},{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"tobias.schreck@cgv.tugraz.at","is_corresponding":false,"name":"Tobias Schreck"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1770","image_caption":"Exemplary comparison of pairs of scatterplots. To analyze the stability concerning input data, we compare pairs of scatterplots that only differ in the amount of jitter applied to the DTM. To analyze the stability concerning hyperparameters, we compare pairs of scatterplots that differ in one hyperparameter setting with consecutive values. To analyze stability concerning randomness, we compare two layouts that only differ in their seeds.","keywords":["Text spatializations, text embeddings, topic modeling, dimensionality reductions, stability, benchmarking"],"open_access_supplemental_link":"https://zenodo.org/records/12772899","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.17876","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1770/v-full-1770_Preview.mp4?token=tW2cl0IdREAQh7MB2CO5gIe0FL6l_fBX0wOR_x1bJHI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1770/v-full-1770_Preview.srt?token=X3q6M7zoNPgkA5ue2Lj4m8txAjiTyWvmkq0EiKTfQhY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-full","session_youtube_ff_id":"T3hvGmZlBgw","session_youtube_ff_link":"https://youtu.be/T3hvGmZlBgw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=0h40m32s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T13:06:00Z","title":"A Large-Scale Sensitivity Analysis on Latent Embeddings and Dimensionality Reductions for Text Spatializations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243381453","abstract":"Scatterplots provide a visual representation of bivariate data (or 2D embeddings of multivariate data) that allows for effective analyses of data dependencies, clusters, trends, and outliers. Unfortunately, classical scatterplots suffer from scalability issues, since growing data sizes eventually lead to overplotting and visual clutter on a screen with a fixed resolution, which hinders the data analysis process. We propose an algorithm that compensates for irregular sample distributions by a smooth transformation of the scatterplot's visual domain. Our algorithm evaluates the scatterplot's density distribution to compute a regularization mapping based on integral images of the rasterized density function. The mapping preserves the samples' neighborhood relations. Few regularization iterations suffice to achieve a nearly uniform sample distribution that efficiently uses the available screen space. We further propose approaches to visually convey the transformation that was applied to the scatterplot and compare them in a user study. We present a novel parallel algorithm for fast GPU-based integral-image computation, which allows for integrating our de-cluttering approach into interactive visual data analysis systems.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Hennes Rave"},{"affiliations":"","email":"","is_corresponding":false,"name":"Vladimir Molchanov"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lars Linsen"}],"award":"","doi":"10.1109/TVCG.2024.3381453","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243381453","image_caption":"UMAP embedding of the MNIST dataset with color-coded classes after four iterations of our algorithm (top left), with grid lines (top right), with density background texture (bottom left), and with contour lines (bottom right). ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.06513","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243381453/v-tvcg-20243381453_Preview.mp4?token=NVVkX__ZVN0UirCznvSvnrnTguAsZpcIu8o5MqWIO6Q&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-tvcg","session_youtube_ff_id":"U4x_-kWR6sw","session_youtube_ff_link":"https://youtu.be/U4x_-kWR6sw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=1h5m9s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T13:30:00Z","title":"De-cluttering Scatterplots with Integral Images","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1056","abstract":"We present FCNR, a fast compressive neural representation for tens of thousands of visualization images under varying viewpoints and timesteps. The existing NeRVI solution, albeit enjoying a high compression ratio, incurs slow speeds in encoding and decoding. Built on the recent advances in stereo image compression, FCNR assimilates stereo context modules and joint context transfer modules to compress image pairs. Our solution significantly improves encoding and decoding speed while maintaining high reconstruction quality and satisfying compression ratio. To demonstrate its effectiveness, we compare FCNR with state-of-the-art neural compression methods, including E-NeRV, HNeRV, NeRVI, and ECSIC. The source code can be found at https://github.com/YunfeiLu0112/FCNR.","accessible_pdf":true,"authors":[{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"ylu25@nd.edu","is_corresponding":true,"name":"Yunfei Lu"},{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"pgu@nd.edu","is_corresponding":false,"name":"Pengfei Gu"},{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"chaoli.wang@nd.edu","is_corresponding":false,"name":"Chaoli Wang"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1056","image_caption":"FCNR is a fast method for compressing a great number of visualization images. It stands out in both encoding and decoding speed, and leads to compressive results while maintains high reconstruction quality using neural representations.","keywords":["Machine Learning Techniques, Image and Video Data"],"open_access_supplemental_link":"https://github.com/YunfeiLu0112/FCNR","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.16369","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1056/v-short-1056_Preview.mp4?token=_udEvR_-XWuGx7m7Uplz74h9eP8X9f7fwPDsTcHXXL8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"dJGQMkPi44U","session_youtube_ff_link":"https://youtu.be/dJGQMkPi44U","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h39m22s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:21:00Z","title":"FCNR: Fast Compressive Neural Representation of Visualization Images","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1097","abstract":"Visualization tools now commonly present automated insights highlighting salient data patterns, including correlations, distributions, outliers, and differences, among others. While these insights are valuable for data exploration and chart interpretation, users currently only have a binary choice of accepting or rejecting them, lacking the flexibility to refine the system logic or customize the insight generation process. To address this limitation, we present Groot, a prototype system that allows users to proactively specify and refine automated data insights. The system allows users to directly manipulate chart elements to receive insight recommendations based on their selections. Additionally, Groot provides users with a manual editing interface to customize, reconfigure, or add new insights to individual charts and propagate them to future explorations. We describe a usage scenario to illustrate how these features collectively support insight editing and configuration and discuss opportunities for future work, including incorporating Large Language Models (LLMs), improving semantic data and visualization search, and supporting insight management. ","accessible_pdf":true,"authors":[{"affiliations":["University of Maryland, College Park, College Park, United States","Tableau Research, Seattle, United States"],"email":"sgathani@cs.umd.edu","is_corresponding":true,"name":"Sneha Gathani"},{"affiliations":["Tableau Research, Seattle, United States"],"email":"amcrisan@uwaterloo.ca","is_corresponding":false,"name":"Anamaria Crisan"},{"affiliations":["Tableau Research, Palo Alto, United States"],"email":"vsetlur@tableau.com","is_corresponding":false,"name":"Vidya Setlur"},{"affiliations":["Tableau Research, Seattle, United States"],"email":"arjun.srinivasan.10@gmail.com","is_corresponding":false,"name":"Arjun Srinivasan"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1097","image_caption":"GROOT allows users to edit and reconfigure automated data insights by (1) selecting marks in charts to get recommendations of new insights based on the selection, (2) reconfiguring default insights by adjusting the template or insight generation thresholds, (3) adding new custom insights by specifying text templates for insights.","keywords":["Automated data insights, insight reconfiguration, natural language templates"],"open_access_supplemental_link":"https://drive.google.com/file/d/1ZTZsN2YbQDdWGiyhVp9SLaE1q7wF6p4r/view?usp=sharing","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1097/v-short-1097_Preview.mp4?token=H27dKmOsBJYdAxm_VtiymNi6EZJv0ednyRKeUBzdvrQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1097/v-short-1097_Preview.srt?token=csXPj69gXJiEfmouLtJnaftwINmIPmGJRGcO3eJ6j34&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"pqb9IsoJKWA","session_youtube_ff_link":"https://youtu.be/pqb9IsoJKWA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h47m31s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:30:00Z","title":"Groot: A System for Editing and Configuring Automated Data Insights","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1130","abstract":"Visualization, from simple line plots to complex high-dimensional visual analysis systems, has established itself throughout numerous domains to explore, analyze, and evaluate data. Applying such visualizations in the context of simulation science where High-Performance Computing (HPC) produces ever-growing amounts of data that is more complex, potentially multidimensional, and multi-modal, takes up resources and a high level of technological experience often not available to domain experts. In this work, we present DaVE - a curated database of visualization examples, which aims to provide state-of-the-art and advanced visualization methods that arise in the context of HPC applications. Based on domain- or data-specific descriptors entered by the user, DaVE provides a list of appropriate visualization techniques, each accompanied by descriptions, examples, references, and resources. Sample code, adaptable container templates, and recipes for easy integration in HPC applications can be downloaded for easy access to high-fidelity visualizations. While the database is currently filled with a limited number of entries based on a broad evaluation of needs and challenges of current HPC users, DaVE is designed to be easily extended by experts from both the visualization and HPC communities.","accessible_pdf":true,"authors":[{"affiliations":["RWTH Aachen University, Aachen, Germany"],"email":"koenen@informatik.rwth-aachen.de","is_corresponding":false,"name":"Jens Koenen"},{"affiliations":["RPTU Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"m.petersen@rptu.de","is_corresponding":false,"name":"Marvin Petersen"},{"affiliations":["RPTU Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"garth@rptu.de","is_corresponding":false,"name":"Christoph Garth"},{"affiliations":["RWTH Aachen University, Aachen, Germany"],"email":"gerrits@vis.rwth-aachen.de","is_corresponding":true,"name":"Tim Gerrits"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1130","image_caption":"Through a modern web interface, DaVE provides access to an extensible database of visualization examples that demonstrate advanced and state-of-the-art visualization methods. Each example comes with descriptions, references and containerized code for an easy deployment on various hardware configurations, ranging from laptops to complex HPC systems.","keywords":["Visualization, Curated Database, High-Performance Computing"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.03188","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1130/v-short-1130_Preview.mp4?token=K6YauQ7vkIi9XG-JJ7A03jGw53b0BhzhLRRnq9T5mlE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1130/v-short-1130_Preview.srt?token=2m7VfFM6D-3xC9d5AH0hh4IeowanC7WC9xkxLSNXe3U&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"HNepHn1OyEM","session_youtube_ff_link":"https://youtu.be/HNepHn1OyEM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h0m39s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T17:45:00Z","title":"DaVE - A Curated Database of Visualization Examples","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1146","abstract":"Millions of runners rely on smart watches that display running-related metrics such as pace, heart rate and distance for training and racing\u2014mostly with text and numbers. Although research tells us that visualizations are a good alternative to text on smart watches, we know little about how visualizations can help in realistic running scenarios. We conducted a study in which 20 runners completed running-related tasks on an outdoor track using both text and visualizations. Our results show that runners are 1.5 to 8 times faster in completing those tasks with visualizations than with text, prefer visualizations to text, and would use such visualizations while running \u2014 if available on their smart watch.","accessible_pdf":false,"authors":[{"affiliations":["University of Victoria, Victoria, Canada"],"email":"sarinaksj@uvic.ca","is_corresponding":false,"name":"Sarina Kashanj"},{"affiliations":["University of Victoria, Victoira, Canada","Delft University of Technology, Delft, Netherlands"],"email":"xiyao.wang23@gmail.com","is_corresponding":false,"name":"Xiyao Wang"},{"affiliations":["University of Victoria, Victoria, Canada"],"email":"cperin@uvic.ca","is_corresponding":true,"name":"Charles Perin"}],"award":"honorable","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1146","image_caption":"The two Data Page layouts we used to study the effectiveness of visualization for running. The data pages show Elapsed Time (left), Pace (top), Distance (right) and Heart Rate (bottom). Pace, Distance and Heart Rate are represented either with TEXT or with VISUALIZATION. The data page on the left shows Elapsed Time and Heart Rate with TEXT, and Pace and Distance with VISUALIZATION; the data page on the right shows Elapsed Time, Pace and Distance with TEXT, and Heart Rate with VISUALIZATION.","keywords":["Running, Visualization, Smartwatch visualization."],"open_access_supplemental_link":"https://osf.io/q7ha9/?view_only=cd042df71d6a40239ee8472b505facf0","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://osf.io/preprints/osf/2fa56","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1146/v-short-1146_Preview.mp4?token=NSucXA2-ztbAqr7mjwjaC7HStkykUljMQ-TmRjbaV9w&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"QmWZ3rzzz60","session_youtube_ff_link":"https://youtu.be/QmWZ3rzzz60","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h57m17s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:39:00Z","title":"Visualizations on Smart Watches while Running: It Actually Helps!","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1159","abstract":"With two studies, we assess how different walking trajectories (straight line, circular, and infinity) and speeds (2 km/h, 4 km/h, and 6 km/h) influence the accuracy and response time of participants reading micro visualizations on a smartwatch. We showed our participants common watch face micro visualizations including date, time, weather information, and four complications showing progress charts of fitness data. Our findings suggest that while walking trajectories did not significantly affect reading performance, overall walking activity, especially at high speeds, hurt reading accuracy and, to some extent, response time.","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"fairouz.grioui@vis.uni-stuttgart.de","is_corresponding":true,"name":"Fairouz Grioui"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"research@blascheck.eu","is_corresponding":false,"name":"Tanja Blascheck"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"yaolijie0219@gmail.com","is_corresponding":false,"name":"Lijie Yao"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"petra.isenberg@inria.fr","is_corresponding":false,"name":"Petra Isenberg"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1159","image_caption":"The watch-face stimulus on top of the teaser image shows an example of the three radial charts of fitness data: calories burned, step count, and distance walked, that we asked participants to compare and estimate the percentage of progress. Below, the figure shows three illustrations of the three walking trajectories: Line, Circular, and Infinity-like and the three walking speeds: 2km/h, 4km/h, and 6km/h that participants performed while reading the visualizations on a smartwatch.","keywords":["micro and mobile visualization, smartwatch"],"open_access_supplemental_link":"https://osf.io/u78s6/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.17893","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1159/v-short-1159_Preview.mp4?token=aJJ1dnPeLmz0Y3QeplQHI53PXlaUJBukFSskvIyX1iQ&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"2DE5LfUsIWA","session_youtube_ff_link":"https://youtu.be/2DE5LfUsIWA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=1h5m31s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:48:00Z","title":"Micro Visualizations on a Smartwatch: Assessing Reading Performance While Walking","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1161","abstract":"Digital twins are an excellent tool to model, visualize, and simulate complex systems, to understand and optimize their operation. In this work, we present the technical challenges of real-time visualization of a digital twin of the Frontier supercomputer. We show the initial prototype and current state of the twin and highlight technical design challenges of visualizing such a large High Performance Computing (HPC) system. The goal is to understand the use of augmented reality as a primary way to extract information and collaborate on digital twins of complex systems. This leverages the spatio-temporal aspect of a 3D representation of a digital twin, with the ability to view historical and real-time telemetry, triggering simulations of a system state and viewing the results, which can be augmented via dashboards for details. Finally, we discuss considerations and opportunities for augmented reality of digital twins of large-scale, parallel computers.","accessible_pdf":false,"authors":[{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"maiterthm@ornl.gov","is_corresponding":true,"name":"Matthias Maiterth"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"brewerwh@ornl.gov","is_corresponding":false,"name":"Wes Brewer"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"dewetd@ornl.gov","is_corresponding":false,"name":"Dane De Wet"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"greenwoodms@ornl.gov","is_corresponding":false,"name":"Scott Greenwood"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"kumarv@ornl.gov","is_corresponding":false,"name":"Vineet Kumar"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"hinesjr@ornl.gov","is_corresponding":false,"name":"Jesse Hines"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"bouknightsl@ornl.gov","is_corresponding":false,"name":"Sedrick L Bouknight"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"wangz@ornl.gov","is_corresponding":false,"name":"Zhe Wang"},{"affiliations":["Hewlett Packard Enterprise, Berkshire, United Kingdom"],"email":"tim.dykes@hpe.com","is_corresponding":false,"name":"Tim Dykes"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"fwang2@ornl.gov","is_corresponding":false,"name":"Feiyi Wang"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1161","image_caption":"Two people standing around a desk, pointing at an augmented reality digital twin of the frontier supercomputer with central energy plant. ","keywords":["Digital Twin, Data Center, Information Representation, Massively Parallel Systems, Operational Data Analytics, Simulation, Augmented Reality"],"open_access_supplemental_link":"https://code.ornl.gov/exadigit/exadigitue5","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1161/v-short-1161_Preview.mp4?token=ET8CQWbPG-hXwXjUua6NoE0PVGz1MjAU-g1pHkZ33IA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1161/v-short-1161_Preview.srt?token=3zqzNeSMX9qgzyGGV40rq8NrGlal2SYk044O1_DS1fg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"bumoRDi4LsE","session_youtube_ff_link":"https://youtu.be/bumoRDi4LsE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h19m41s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:03:00Z","title":"Visualizing an Exascale Data Center Digital Twin: Considerations, Challenges and Opportunities","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1166","abstract":"Custom animated visualizations of large, complex datasets are helpful across many domains, but they are hard to develop. Much of the difficulty arises from maintaining visualization state across many animated graphical elements that may change in number over time. We contribute Counterpoint, a framework for state management designed to help implement such visualizations in JavaScript. Using Counterpoint, developers can manipulate large collections of marks with reactive attributes that are easy to render in scalable APIs such as Canvas and WebGL. Counterpoint also helps orchestrate the entry and exit of graphical elements using the concept of a rendering \"stage.\" Through a performance evaluation, we show that Counterpoint adds minimal overhead over current high-performance rendering techniques while simplifying implementation. We provide two examples of visualizations created using Counterpoint that illustrate its flexibility and compatibility with other visualization toolkits as well as considerations for users with disabilities. Counterpoint is open-source and available at https://github.com/cmudig/counterpoint.","accessible_pdf":true,"authors":[{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"vsivaram@andrew.cmu.edu","is_corresponding":true,"name":"Venkatesh Sivaraman"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"fje@cmu.edu","is_corresponding":false,"name":"Frank Elavsky"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"domoritz@cmu.edu","is_corresponding":false,"name":"Dominik Moritz"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"adamperer@cmu.edu","is_corresponding":false,"name":"Adam Perer"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1166","image_caption":"Counterpoint is an open-source TypeScript framework that makes it easier to create animated visualizations, such as the ones shown here, using high-performance Web graphics frameworks like Canvas and WebGL.","keywords":["Visualization Toolkits, Animation, Web Interfaces, Software System Structures"],"open_access_supplemental_link":"https://dig.cmu.edu/counterpoint","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1166/v-short-1166_Preview.mp4?token=HGqEhRrBoE95UDds4tEq-HzpElLW8IjdT04FWh_y_-w&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1166/v-short-1166_Preview.srt?token=dkB9rLeqmfOuUxrGJwnjXMnK5w6U7Oo_UebdTcbLPfI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"4zOVLaUf7po","session_youtube_ff_link":"https://youtu.be/4zOVLaUf7po","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h10m3s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T17:54:00Z","title":"Counterpoint: Orchestrating Large-Scale Custom Animated Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1248","abstract":"Statistical practices such as building regression models or running hypothesis tests rely on following rigorous procedures of steps and verifying assumptions on data to produce valid results. However, common statistical tools do not verify users\u2019 decision choices and provide low-level statistical functions without instructions on the whole analysis practice. Users can easily misuse analysis methods, potentially decreasing the validity of results. To address this problem, we introduce GuidedStats, an interactive interface within computational notebooks that encapsulates guidance, models, visualization, and exportable results into interactive workflows. It breaks down typical analysis processes, such as linear regression and two-sample T-tests, into interactive steps supplemented with automatic visualizations and explanations for step-wise evaluation. Users can iterate on input choices to refine their models, while recommended actions and exports allow the user to continue their analysis in code. Case studies show how GuidedStats offers valuable instructions for conducting fluid statistical analyses while finding possible assumption violations in the underlying data, supporting flexible and accurate statistical analyses.","accessible_pdf":true,"authors":[{"affiliations":["New York University, New York, United States"],"email":"yz9381@nyu.edu","is_corresponding":true,"name":"Yuqi Zhang"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"adamperer@cmu.edu","is_corresponding":false,"name":"Adam Perer"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"willepp@cmu.edu","is_corresponding":false,"name":"Will Epperson"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1248","image_caption":"GuidedStats assists users with statistical analyses through guided workflows. It automatically verifies assumptions and provides actionable suggestions. At the current step, the user is checking assumptions, with the explanation offering more details about the relevant statistical concepts.","keywords":["Data science tools, computational notebooks, analytical guidance"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1248/v-short-1248_Preview.mp4?token=DOEPbtaqFuDv1a5br6vnEiqyvBoppG_qx-z7anTi-Xc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1248/v-short-1248_Preview.srt?token=j0uSYby6HvG32XaP1PEpcjXJYshNz4jbHNAS94B7eZY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"kEa12neWJfQ","session_youtube_ff_link":"https://youtu.be/kEa12neWJfQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h29m27s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:12:00Z","title":"Guided Statistical Workflows with Interactive Explanations and Assumption Checking","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1040","abstract":"From dirty data to intentional deception, there are many threats to the validity of data-driven decisions. Making use of data, especially new or unfamiliar data, therefore requires a degree of trust or verification. How is this trust established? In this paper, we present the results of a series of interviews with both producers and consumers of data artifacts (outputs of data ecosystems like spreadsheets, charts, and dashboards) aimed at understanding strategies and obstacles to building trust in data. We find a recurring need, but lack of existing standards, for data validation and verification, especially among data consumers. We therefore propose a set of data guards: methods and tools for fostering trust in data artifacts.","accessible_pdf":true,"authors":[{"affiliations":["Tableau Research, Seattle, United States"],"email":"nicole.sultanum@gmail.com","is_corresponding":false,"name":"Nicole Sultanum"},{"affiliations":["Tableau Research, Seattle, United States"],"email":"bromley.denny@gmail.com","is_corresponding":false,"name":"Dennis Bromley"},{"affiliations":["Northeastern University, Portland, United States"],"email":"m.correll@northeastern.edu","is_corresponding":false,"name":"Michael Correll"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1040","image_caption":"Data-driven decision making is ostensibly more common now than ever, but without specific points of trust in the data handling process, people often fall back on ad hoc decision justification mechanisms. Driven by user interviews of both data producers and data consumers, Data Guards is a set of seven proposed strategies for improving users' trust in data to help them make more confident data-driven decisions.","keywords":["Data visualization, data cleaning, data quality, trust"],"open_access_supplemental_link":"https://osf.io/ynm57/?view_only=572a886b5b154c8298c8b66ba170c632","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.14042","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1040/v-short-1040_Preview.mp4?token=I0DHvU5-20n2Fp6mZnDJ99636Y0rfqpqfaHfF2OYrcE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"lGC-JrOjFTo","session_youtube_ff_link":"https://youtu.be/lGC-JrOjFTo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h10m45s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:09:00Z","title":"Data Guards: Challenges and Solutions for Fostering Trust in Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1114","abstract":"As visualization literacy and its implications gain prominence, we need effective methods to prepare students for the variety of visualizations in an increasingly data-driven world. Recently, the potential of comics has been recognized in various data visualization contexts, including educational settings. We describe the development of a workshop in which we use our ``comic construction kit'' as a tool for students to understand various data visualization techniques through an interactive creative approach of creating explanatory comics. We report on our insights from holding eight workshops with high school students and teachers, university students, and lecturers, aiming to enhance the landscape of hands-on visualization activities that can enrich the visualization classroom. The comic construction kit and all supplemental materials are open source under a CC-BY license and available at https://fhstp.github.io/comixplain/vis4schools.html.","accessible_pdf":true,"authors":[{"affiliations":["St. P\u00f6lten University of Applied Sciences, St. P\u00f6lten, Austria"],"email":"magdalena.boucher@fhstp.ac.at","is_corresponding":true,"name":"Magdalena Boucher"},{"affiliations":["St. Poelten University of Applied Sciences, St. Poelten, Austria"],"email":"christina.stoiber@fhstp.ac.at","is_corresponding":false,"name":"Christina Stoiber"},{"affiliations":["School of Informatics, Communications and Media, Hagenberg im M\u00fchlkreis, Austria"],"email":"mandy.keck@fh-hagenberg.at","is_corresponding":false,"name":"Mandy Keck"},{"affiliations":["St. Poelten University of Applied Sciences, St. Poelten, Austria"],"email":"victor.oliveira@fhstp.ac.at","is_corresponding":false,"name":"Victor Adriel de Jesus Oliveira"},{"affiliations":["St. Poelten University of Applied Sciences, St. Poelten, Austria"],"email":"wolfgang.aigner@fhstp.ac.at","is_corresponding":false,"name":"Wolfgang Aigner"}],"award":"honorable","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1114","image_caption":"A preview of some customizeable character stickers and pre-printed visualizations from our comic construction kit, with a comic example by a student.","keywords":["data comics, storytelling, visualization education, visualization literacy, visualization activities"],"open_access_supplemental_link":"https://fhstp.github.io/comixplain/vis4schools.html","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://phaidra.fhstp.ac.at/o:5588","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"CopQJYd6mh0","session_youtube_ff_link":"https://youtu.be/CopQJYd6mh0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=1h7m6s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T17:03:00Z","title":"The Comic Construction Kit: An Activity for Students to Learn and Explain Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1117","abstract":"Geovisualizations are powerful tools for exploratory spatial analysis, enabling sighted users to discern patterns, trends, and relationships within geographic data. However, these visual tools have remained largely inaccessible to screen-reader users. We introduce AltGeoViz, a new interactive geovisualization approach that dynamically generates alt-text descriptions based on the user's current map view, providing voiceover summaries of spatial patterns and descriptive statistics.In a remote user study with five screen-reader users, we found that participants were able to interact with spatial data in previously infeasible ways, demonstrated a clear understanding of data summaries and their location context, and could synthesize spatial understandings of their explorations. Moreover, we identified key areas for improvement, such as the addition of spatial navigation controls and comparative analysis features.","accessible_pdf":true,"authors":[{"affiliations":["University of Washington, Seattle, United States"],"email":"chuchuli@cs.washington.edu","is_corresponding":true,"name":"Chu Li"},{"affiliations":["University of Washington, Seattle, United States"],"email":"ypang2@cs.washington.edu","is_corresponding":false,"name":"Rock Yuren Pang"},{"affiliations":["University of Washington, Seattle, United States"],"email":"asharif@cs.washington.edu","is_corresponding":false,"name":"Ather Sharif"},{"affiliations":["University of Washington, Seattle, United States"],"email":"chheda@cs.washington.edu","is_corresponding":false,"name":"Arnavi Chheda-Kothary"},{"affiliations":["University of Washington, Seattle, United States"],"email":"jheer@uw.edu","is_corresponding":false,"name":"Jeffrey Heer"},{"affiliations":["University of Washington, Seattle, United States"],"email":"jonf@cs.uw.edu","is_corresponding":false,"name":"Jon E. Froehlich"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1117","image_caption":"AltGeoViz enables screen-reader users to interact with dynamic geovisualizations. The left image shows the initial view, with the title, a summary of the general spatial pattern, and data extrema and averages presented to the user.The center image shows how as the user moves and zooms, the information is updated, and they can hear the boundary of their current viewport. The right image demonstrates how the data can be shown at different geographic units, such as state or county level, depending on the zoom level. See the provided video for a full demonstration of the AltGeoViz functionality. ","keywords":["dynamic geovisualization, accessibility, alt-text, screen-reader"],"open_access_supplemental_link":"https://github.com/makeabilitylab/altgeoviz","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2406.13853","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1117/v-short-1117_Preview.mp4?token=eBWMER4GnSZFHZaWimXzfNCKGTrVOVyHY6Wo_kRg5yI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1117/v-short-1117_Preview.srt?token=SrKdWP8Up_PgHYSD-3fOBsOlspY3U6SURAVdsDl8oZA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"S6K-w6Kn090","session_youtube_ff_link":"https://youtu.be/S6K-w6Kn090","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h20m0s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:18:00Z","title":"AltGeoViz: Facilitating Accessible Geovisualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1126","abstract":"Psychological research often involves understanding psychological constructs through conducting factor analysis on data collected by a questionnaire, which can comprise hundreds of questions. Without interactive systems for interpreting factor models, researchers are frequently exposed to subjectivity, potentially leading to misinterpretations or overlooked crucial information. This paper introduces FAVis, a novel interactive visualization tool designed to aid researchers in interpreting and evaluating factor analysis results. FAVis enhances the understanding of relationships between variables and factors by supporting multiple views for visualizing factor loadings and correlations, allowing users to analyze information from various perspectives. The primary feature of FAVis is to enable users to set optimal thresholds for factor loadings to balance clarity and information retention. FAVis also allows users to assign tags to variables, enhancing the understanding of factors by linking them to their associated psychological constructs. Our user study demonstrates the utility of FAVis in various tasks.","accessible_pdf":true,"authors":[{"affiliations":["University of Notre Dame, Notre Dame, United States","University of Notre Dame, Notre Dame, United States"],"email":"ylu22@nd.edu","is_corresponding":true,"name":"Yikai Lu"},{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"chaoli.wang@nd.edu","is_corresponding":false,"name":"Chaoli Wang"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1126","image_caption":"We propose FAVis (https://luyikei.github.io/favis/). (A) Matrix view shows a factor loadings matrix; (B) Network view visualizes cross-loadings most effectively; (C) Parallel-coordinates view shows factor loadings for each variable/factor allows for selecting variables/factors within a range; (D) Tag view shows the relevance of tags for each factor by counting tags annotated for variables based on a theory; (E) Word cloud view helps interpret factors by correlating fonts with the values of factor loadings; (F) Threshold view controls the number of factor loadings shown in different views; (G) Factor correlation view shows the network of factor correlations; (H) Top bar for filtering.","keywords":["Machine Learning, Statistics, Modelling, and Simulation Applications, Coordinated and Multiple Views, High-dimensional Data"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.14072","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1126/v-short-1126_Preview.mp4?token=Py6oaIPs8w8gzgtsu2sfDDyy4zLBsY5bFKVNS7lGboM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1126/v-short-1126_Preview.srt?token=MVgNa4kGGix3ZpPG_yBLmTLC9Y_QweqCRI1O2NW2RNc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"USpKXLjKe1A","session_youtube_ff_link":"https://youtu.be/USpKXLjKe1A","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h1m0s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:00:00Z","title":"FAVis: Visual Analytics of Factor Analysis for Psychological Research","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1185","abstract":"The visualization and interactive exploration of geo-referenced networks poses challenges if the network's nodes are not evenly distributed. Our approach proposes new ways of realizing animated transitions for exploring such networks from an ego-perspective. We aim to reduce the required screen estate while maintaining the viewers' mental map of distances and directions. A preliminary study provides first insights of the comprehensiveness of animated geographic transitions regarding directional relationships between start and end point in different projections. Two use cases showcase how ego-perspective graph exploration can be supported using less screen space than previous approaches.","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"max@mumintroll.org","is_corresponding":true,"name":"Max Franke"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"samuel.beck@vis.uni-stuttgart.de","is_corresponding":false,"name":"Samuel Beck"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"steffen.koch@vis.uni-stuttgart.de","is_corresponding":false,"name":"Steffen Koch"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1185","image_caption":"Our approach supports the exploration of relations in geo-referenced networks with animated zoom-and-pan transitions. The figure shows such a transition realized as a two-point equidistant projection. The geodetic line (blue arrow) between the start and end node is projected without distortion. Example views during the animated transition are shown to the left and right of the map. Their respective coverage is indicated by red circles.","keywords":["Geographical projection, geo-referenced graph, degree-of-interest function, ego-perspective exploration."],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1185/v-short-1185_Preview.mp4?token=nAu7NU25mRuUoQFmO5uEegIKXfgjvuDrFomvfMeej-g&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1185/v-short-1185_Preview.srt?token=Gc4t4Ptp3CFSPRpeiFS6pC0a-gqH_JjwfT0S8OlSI8s&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"dn3WTXLOdUE","session_youtube_ff_link":"https://youtu.be/dn3WTXLOdUE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h48m48s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:45:00Z","title":"Two-point Equidistant Projection and Degree-of-interest Filtering for Smooth Exploration of Geo-referenced Networks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1191","abstract":"To enable data-driven decision-making across organizations, data professionals need to share insights with their colleagues in context-appropriate communication channels. Many of their colleagues rely on data but are not themselves analysts; furthermore, their colleagues are reluctant or unable to use dedicated analytical applications or dashboards, and they expect communication to take place within threaded collaboration platforms such as Slack or Microsoft Teams. In this paper, we introduce a set of six strategies for adapting content from business intelligence (BI) dashboards into appropriate formats for sharing on collaboration platforms, formats that we refer to as dashboard snapshots. Informed by prior studies of enterprise communication around data, these strategies go beyond redesigning or restyling by considering varying levels of data literacy across an organization, introducing affordances for self-service question-answering, and anticipating the post-sharing lifecycle of data artifacts. These strategies involve the use of templates that are matched to common communicative intents, serving to reduce the workload of data professionals. We contribute a formal representation of these strategies and demonstrate their applicability in a comprehensive enterprise communication scenario featuring multiple stakeholders that unfolds over the span of months. ","accessible_pdf":true,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"hyeokkim2024@u.northwestern.edu","is_corresponding":true,"name":"Hyeok Kim"},{"affiliations":["Tableau Research, Seattle, United States"],"email":"arjun.srinivasan.10@gmail.com","is_corresponding":false,"name":"Arjun Srinivasan"},{"affiliations":["Tableau Research, Seattle, United States"],"email":"mbrehmer@uwaterloo.ca","is_corresponding":false,"name":"Matthew Brehmer"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1191","image_caption":"A pipeline for making selections from a dashboard, retargeting them as components, combining the components into a dashboard snapshot, sharing and updating the snapshot on a collaboration platform.","keywords":["Collaboration visualization, visualization retargeting, responsive visualization design, business intelligence"],"open_access_supplemental_link":"https://dashboard-snapshot.github.io","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.00242","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1191/v-short-1191_Preview.mp4?token=hdtHw2ttp4zyinx0e-hcDqPYjbBGrd__GppnixInt8Q&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1191/v-short-1191_Preview.srt?token=Hi010g9WmvNn2xupgck3Jp3ivcfflWCT5Puv7-WviZY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"SLBqiNRU_NY","session_youtube_ff_link":"https://youtu.be/SLBqiNRU_NY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h58m35s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:54:00Z","title":"Bringing Data into the Conversation: Adapting Content from Business Intelligence Dashboards for Threaded Collaboration Platforms","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1264","abstract":"The Local Moran's I statistic is a valuable tool for identifying localized patterns of spatial autocorrelation. Understanding these patterns is crucial in spatial analysis, but interpreting the statistic can be difficult. To simplify this process, we introduce three novel visualizations that enhance the interpretation of Local Moran's I results. These visualizations can be interactively linked to one another, and to established visualizations, to offer a more holistic exploration of the results. We provide a JavaScript library with implementations of these new visual elements, along with a web dashboard that demonstrates their integrated use. ","accessible_pdf":false,"authors":[{"affiliations":["NIH, Rockville, United States","Queen's University, Belfast, United Kingdom"],"email":"masonlk@nih.gov","is_corresponding":true,"name":"Lee Mason"},{"affiliations":["Queen's University Belfast , Belfast , United Kingdom"],"email":"b.hicks@qub.ac.uk","is_corresponding":false,"name":"Bl\u00e1naid Hicks"},{"affiliations":["National Institutes of Health, Rockville, United States"],"email":"jonas.dealmeida@nih.gov","is_corresponding":false,"name":"Jonas S Almeida"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1264","image_caption":"A screenshot of an interactive dashboard featuring the three Local Moran's I plot designs proposed in our paper.","keywords":["Spatial, spatial clustering, spatial autocorrelation, geospatial, GIS, interactive visualization, visual analytics, Moran's I, local indicators of spatial association"],"open_access_supplemental_link":"https://github.com/episphere/moranplot","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.02418","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1264/v-short-1264_Preview.mp4?token=bM9IpDfGMhh8fPX0WeTL_qJUJthGTqfQxwl-D30S9RM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1264/v-short-1264_Preview.srt?token=h1d3DNeWPE2VfjyPIc-kozlja0tfD_Yik8VdjSr892g&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"E1nVUBZigfY","session_youtube_ff_link":"https://youtu.be/E1nVUBZigfY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h39m6s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:36:00Z","title":"Demystifying Spatial Dependence: Interactive Visualizations for Interpreting Local Spatial Autocorrelation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1285","abstract":"This study examines the impacts of public health communications visualizing risk disparities between racial and other social groups. It compares the effects of traditional bar charts to an alternative design emphasizing geographic variability with differing annotations and jitter plots. Whereas both visualization designs increased perceived vulnerability, behavioral intent, and policy support, the geo-emphasized charts were significantly more effective in reducing personal attribution biases. The findings also reveal emotionally taxing experiences for chart viewers from marginalized communities. This work suggests a need for strategic reevaluation of visual communication tools in public health to enhance understanding and engagement without reinforcing stereotypes or emotional distress.","accessible_pdf":false,"authors":[{"affiliations":["3iap, Raleigh, United States"],"email":"eli@3iap.com","is_corresponding":true,"name":"Eli Holder"},{"affiliations":["Northeastern University, Boston, United States","University of California Merced, Merced, United States"],"email":"l.padilla@northeastern.edu","is_corresponding":false,"name":"Lace M. Padilla"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1285","image_caption":"Bars and geography-emphasized chart (geo-emph) showing crude mortality rates for heart disease. The geo-emph chart includes the same overall mortality rates but uses annotations and jitter dots of U.S. states to emphasize within-group differences.","keywords":["Health Equity, Public Health Communication"],"open_access_supplemental_link":"https://osf.io/emb8y/","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1285/v-short-1285_Preview.mp4?token=UMeO7m2MzfNLyPgPhuBS9ecNtcw7b_GxGKH3rMU7wNE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"tUzqEEJJyKw","session_youtube_ff_link":"https://youtu.be/tUzqEEJJyKw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h30m20s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:27:00Z","title":"\"Must Be a Tuesday\": Affect, Attribution, and Geographic Variability in Equity-Oriented Visualizations of Population Health Disparities","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1047","abstract":"In the rapidly evolving field of deep learning, traditional methodologies for designing models predominantly rely on code-based frameworks. While these approaches provide flexibility, they create a significant barrier to entry for non-experts and obscure the immediate impact of architectural decisions on model performance. In response to this challenge, recent no-code approaches have been developed with the aim of enabling easy model development through graphical interfaces. However, both traditional and no-code methodologies share a common limitation that the inability to predict model outcomes or identify issues without executing the model. To address this limitation, we introduce an intuitive visual feedback-based no-code approach to visualize and analyze deep learning models during the design phase. This approach utilizes dataflow-based visual programming with dynamic visual encoding of model architecture. A user study was conducted with deep learning developers to demonstrate the effectiveness of our approach in enhancing the model design process, improving model understanding, and facilitating a more intuitive development experience. The findings of this study suggest that real-time architectural visualization significantly contributes to more efficient model development and a deeper understanding of model behaviors.","accessible_pdf":true,"authors":[{"affiliations":["VIENCE Inc., Seoul, Korea, Republic of","Korea University, Seoul, Korea, Republic of"],"email":"juny0603@gmail.com","is_corresponding":true,"name":"JunYoung Choi"},{"affiliations":["VIENCE Inc., Seoul, Korea, Republic of"],"email":"wings159@vience.co.kr","is_corresponding":false,"name":"Sohee Park"},{"affiliations":["Korea University, Seoul, Korea, Republic of"],"email":"hellenkoh@gmail.com","is_corresponding":false,"name":"GaYeon Koh"},{"affiliations":["VIENCE Inc., Seoul, Korea, Republic of"],"email":"k0seo0330@vience.co.kr","is_corresponding":false,"name":"Youngseo Kim"},{"affiliations":["VIENCE Inc., Seoul, Korea, Republic of","Korea University, Seoul, Korea, Republic of"],"email":"wkjeong@korea.ac.kr","is_corresponding":false,"name":"Won-Ki Jeong"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1047","image_caption":"An example of proofreading of structural issues in a deep learning model (U-Net) using a proposed visual feedback-based no-code approach, and an example of the conventional method (code-based) corresponding to the errors present in the model.","keywords":["Deep learning, visual programming, explainable AI."],"open_access_supplemental_link":"https://vience.io/vience-canvas/mlops/sample","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1047/v-short-1047_Preview.mp4?token=CMcl7ui0rQ393UyFOML-kB2MqUGiK4UTVazthQ6Lrtw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1047/v-short-1047_Preview.srt?token=i2_eLL9wzYoVKRhFXnyAoQv3qoo_n0tZ5v9c3rqE-zk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"79um-yl_rvU","session_youtube_ff_link":"https://youtu.be/79um-yl_rvU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h39m55s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:21:00Z","title":"Intuitive Design of Deep Learning Models through Visual Feedback","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1058","abstract":"Semantic interaction (SI) in Dimension Reduction (DR) of images allows users to incorporate feedback through direct manipulation of the 2D positions of images. Through interaction, users specify a set of pairwise relationships that the DR should aim to capture. Existing methods for images incorporate feedback into the DR through feature weights on abstract embedding features. However, if the original embedding features do not suitably capture the users\u2019 task then the DR cannot either. We propose ImageSI, an SI method for image DR that incorporates user feedback directly into the image model to update the underlying embeddings, rather than weighting them. In doing so, ImageSI ensures that the embeddings suitably capture the features necessary for the task so that the DR can subsequently organize images using those features. We present two variations of ImageSI using different loss functions - ImageSI_MDS\u22121 , which prioritizes the explicit pairwise relationships from the interaction and ImageSI_Triplet, which prioritizes clustering, using the interaction to define groups of images. Finally, we present a usage scenario and a simulation-based evaluation to demonstrate the utility of ImageSI and compare it to current methods.","accessible_pdf":true,"authors":[{"affiliations":["Vriginia Tech, Blacksburg, United States"],"email":"jiayuelin@vt.edu","is_corresponding":false,"name":"Jiayue Lin"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"rfaust1@tulane.edu","is_corresponding":true,"name":"Rebecca Faust"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"north@vt.edu","is_corresponding":false,"name":"Chris North"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1058","image_caption":"An example using a collection of images of sharks and snakes. We want the dimension reduction (DR) to organize images based on the feature \"open mouth\" vs \"closed mouth\". (A) shows the initial projection, with added contours to highlight the locations of images with open mouths (yellow) and closed mouths (blue). The DR is not able to identify the open vs closed mouth feature. (B) illustrates the user\u2019s interaction to convey this feature. (C) shows the DR after using ImageSI to update the embeddings. The DR now captures this feature much better than in it did with the original embeddings. ","keywords":["Semantic Interaction, Dimension Reduction"],"open_access_supplemental_link":"https://osf.io/m2wdf/?view_only=3b2f851592874ac791ad0ba5bc809774","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1058/v-short-1058_Preview.mp4?token=DiJFwOd1DL-Y3YZiGI_igjZZaP9lc9UQl3GxZuDqyok&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1058/v-short-1058_Preview.srt?token=U3bxJhzPWCwU3zWDa-ETnp5-cAETvSqcvLtlycpJ9YA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"hPRtueM5Aw4","session_youtube_ff_link":"https://youtu.be/hPRtueM5Aw4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h0m45s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T17:45:00Z","title":"ImageSI: Semantic Interaction for Deep Learning Image Projections","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1064","abstract":"Large Language Models (LLMs) have demonstrated remarkable versatility in visualization authoring, but often generate suboptimal designs that are invalid or fail to adhere to design guidelines for effective visualization. We present Bavisitter, a natural language interface that integrates established visualization design guidelines into LLMs.Based on our survey on the design issues in LLM-generated visualizations, Bavisitter monitors the generated visualizations during a visualization authoring dialogue to detect an issue. When an issue is detected, it intervenes in the dialogue, suggesting possible solutions to the issue by modifying the prompts. We also demonstrate two use cases where Bavisitter detects and resolves design issues from the actual LLM-generated visualizations.","accessible_pdf":false,"authors":[{"affiliations":["Sungkyunkwan University, Suwon, Korea, Republic of"],"email":"jiwnchoi@skku.edu","is_corresponding":true,"name":"Jiwon Choi"},{"affiliations":["Sungkyunkwan University, Suwon, Korea, Republic of"],"email":"dlwodnd00@skku.edu","is_corresponding":false,"name":"Jaeung Lee"},{"affiliations":["Sungkyunkwan University, Suwon, Korea, Republic of"],"email":"jmjo@skku.edu","is_corresponding":false,"name":"Jaemin Jo"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1064","image_caption":"Bavisitter\u2019s visualization authoring workflow. A) The user requests a visualization to an LLM by prompting \u201cShow me the average yield by site.\u201d B) The LLM generates an ineffective visualization design that uses a connection mark to encode the categorical attribute on the x-axis. C) Bavisitter detects the design issue in the generated visualization and gives feedback to the LLM by modifying the original prompt, e.g., appending \u201cChange mark to bar\u201d. As a result, the user can author visualization designs that conform to known design guidelines and knowledge while exploiting the flexibility that the LLM provides. ","keywords":["Automated Visualization, Visualization Tools, Large Language Model."],"open_access_supplemental_link":"https://github.com/jiwnchoi/Bavisitter","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1064/v-short-1064_Preview.mp4?token=5RpdXHt32iGGi7RTjNMIT0xavvRbhHTuPaoWv7omPIo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1064/v-short-1064_Preview.srt?token=276KbpWPG1SauiOrcfVnuu8YnxSc5r4PQ1Z5BVO4iL4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"yRnmq_TZ2FU","session_youtube_ff_link":"https://youtu.be/yRnmq_TZ2FU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h57m33s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:39:00Z","title":"Bavisitter: Integrating Design Guidelines into Large Language Models for Visualization Authoring","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1089","abstract":"In healthcare, AI techniques are widely used for tasks like risk assessment and anomaly detection. Despite AI's potential as a valuable assistant, its role in complex medical data analysis often oversimplifies human-AI collaboration dynamics. To address this, we collaborated with a local hospital, engaging six physicians and one data scientist in a formative study. From this collaboration, we propose a framework integrating two-phase interactive visualization systems: one for Human-Led, AI-Assisted Retrospective Analysis and another for AI-Mediated, Human-Reviewed Iterative Modeling. This framework aims to enhance understanding and discussion around effective human-AI collaboration in healthcare. ","accessible_pdf":false,"authors":[{"affiliations":["ShanghaiTech University, Shanghai, China","ShanghaiTech University, Shanghai, China"],"email":"ouyy@shanghaitech.edu.cn","is_corresponding":true,"name":"Yang Ouyang"},{"affiliations":["University of Illinois at Urbana-Champaign, Champaign, United States","University of Illinois at Urbana-Champaign, Champaign, United States"],"email":"chenyang.zhang@gatech.edu","is_corresponding":false,"name":"Chenyang Zhang"},{"affiliations":["ShanghaiTech University, Shanghai, China","ShanghaiTech University, Shanghai, China"],"email":"wanghe1@shanghaitech.edu.cn","is_corresponding":false,"name":"He Wang"},{"affiliations":["Zhongshan Hospital Fudan University, Shanghai, China","Zhongshan Hospital Fudan University, Shanghai, China"],"email":"15301050137@fudan.edu.cn","is_corresponding":false,"name":"Tianle Ma"},{"affiliations":["Zhongshan Hospital Fudan University, Shanghai, China","Zhongshan Hospital Fudan University, Shanghai, China"],"email":"cjiang_fdu@yeah.net","is_corresponding":false,"name":"Chang Jiang"},{"affiliations":["Zhongshan Hospital Fudan University, Shanghai, China","Zhongshan Hospital Fudan University, Shanghai, China"],"email":"522649732@qq.com","is_corresponding":false,"name":"Yuheng Yan"},{"affiliations":["Zhongshan Hospital Fudan University, Shanghai, China","Zhongshan Hospital Fudan University, Shanghai, China"],"email":"yan.zuoqin@zs-hospital.sh.cn","is_corresponding":false,"name":"Zuoqin Yan"},{"affiliations":["Hong Kong University of Science and Technology, Hong Kong, Hong Kong","Hong Kong University of Science and Technology, Hong Kong, Hong Kong"],"email":"mxj@cse.ust.hk","is_corresponding":false,"name":"Xiaojuan Ma"},{"affiliations":["Southeast University, Nanjing, China","Southeast University, Nanjing, China"],"email":"cshiag@connect.ust.hk","is_corresponding":false,"name":"Chuhan Shi"},{"affiliations":["ShanghaiTech University, Shanghai, China","ShanghaiTech University, Shanghai, China"],"email":"liquan@shanghaitech.edu.cn","is_corresponding":false,"name":"Quan Li"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1089","image_caption":"System overview: Phase I includes (A) Cohort View for understanding drug event and disease progression relationships, (B) Patient Projection View to explore specific patient cohort characteristics, and (C) Medical Event View for detailed visualization of patient medical events. Phase II comprises (D) Modeling View for iterative AI model development and performance evaluation, and (E) Logs View for maintaining iteration records of models and associated data.","keywords":["Role Transfer, Hormone-related Medical Records, Visual Analytics, Machine Learning"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1089/v-short-1089_Preview.mp4?token=SF79zbkXm7K6OO_TFS20wJPZP4RNJ9eNFhQfShoogXk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1089/v-short-1089_Preview.srt?token=9Nik6uy2GwUw6bX8pcyInh0NEgvJlDcJfnZ78Jha6Mw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"_T6AUyLBmY4","session_youtube_ff_link":"https://youtu.be/_T6AUyLBmY4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h19m42s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:03:00Z","title":"A Two-Phase Visualization System for Continuous Human-AI Collaboration in Sequelae Analysis and Modeling","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1177","abstract":"The proliferation of misleading visualizations online, particularly during critical events like public health crises and elections, poses a significant risk of misinformation. This work investigates the capability of GPT-4 models (4V, 4o, and 4o mini) to detect misleading visualizations. Utilizing a dataset of tweet-visualization pairs with various visual misleaders, we tested these models under four experimental conditions with different levels of guidance. Our results demonstrate that GPT-4 models can detect misleading visualizations with moderate accuracy without prior training (naive zero-shot) and that performance considerably improves by providing the model with the definitions of misleaders (guided zero-shot). Our results indicate that a single prompt engineering technique does not necessarily yield the best results for all types of misleaders. We found that guided few-shot was more effective for reasoning misleaders, while guided zero-shot performed better for design misleaders. This study underscores the feasibility of using large vision-language models to combat misinformation and emphasizes the importance of optimizing prompt engineering to enhance detection accuracy.","accessible_pdf":true,"authors":[{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"jhalexander@umass.edu","is_corresponding":false,"name":"Jason Huang Alexander"},{"affiliations":["University of Masssachusetts Amherst, Amherst, United States"],"email":"phnanda@umass.edu","is_corresponding":false,"name":"Priyal H Nanda"},{"affiliations":["Northeastern University, Boston, United States"],"email":"yangkc@iu.edu","is_corresponding":false,"name":"Kai-Cheng Yang"},{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"asarv@cs.umass.edu","is_corresponding":false,"name":"Ali Sarvghad"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1177","image_caption":"We evaluated the accuracy of three OpenAI GPT-4 models in detecting misleading visualizations. Our findings suggest that this approach could serve as a valuable complementary method for addressing misleading visualizations.","keywords":["Misleading visualizations, GPT-4, large vision language model, misinformation"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1177/v-short-1177_Preview.mp4?token=uEnZBJap4zJdsEMot4L6tPC2sUvFO2MLDPr5nGJ0LTc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1177/v-short-1177_Preview.srt?token=eFl3tdQd8M-ElVVmSy-kgMYVuPmMMSimsp_USszqngs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"dUwRzvfPmaI","session_youtube_ff_link":"https://youtu.be/dUwRzvfPmaI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h29m21s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:12:00Z","title":"Can GPT-4 Models Detect Misleading Visualizations?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1186","abstract":"Data visualizations help extract insights from datasets, but reaching these insights requires decomposing high level goals into low-level analytic tasks that can be complex due to varying degrees of data literacy and visualization experience. Recent advancements in large language models (LLMs) have shown promise for lowering barriers for users to achieve tasks such as writing code and may likewise facilitate visualization insight. Scalable Vector Graphics (SVG), a text-based image format common in data visualizations, matches well with the text sequence processing of transformer-based LLMs. In this paper, we explore the capability of LLMs to perform 10 low-level visual analytic tasks defined by Amar, Eagan, and Stasko directly on SVG-based visualizations. Using zero-shot prompts, we instruct the models to provide responses or modify the SVG code based on given visualizations. Our findings demonstrate that LLMs can effectively modify existing SVG visualizations for some tasks like Cluster but perform poorly on tasks requiring mathematical operations like Compute Derived Value. We also discovered that LLM performance can vary based on factors such as the number of data points, the presence of value labels, and the chart type. Our findings contribute to gauging the general capabilities of LLMs and highlight the need for further exploration and development to fully harness their potential in supporting visual analytic tasks.","accessible_pdf":true,"authors":[{"affiliations":["Brown University, Providence, United States"],"email":"leooooxzz@gmail.com","is_corresponding":true,"name":"Zhongzheng Xu"},{"affiliations":["Emory University, Atlanta, United States"],"email":"emily.wall@emory.edu","is_corresponding":false,"name":"Emily Wall"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1186","image_caption":"The image is an illustration of the study design of the paper Exploring the Capability of LLMs in Performing Low-Level Visual Analytic Tasks on SVG Data Visualizations. This figure consists of three main components: Plot Type, Plot Difficulty, and Low-level Visual Analytics Tasks. Plot Types include Scatter, Line, and Bar charts, all in SVG format. Plot Difficulty is divided into Small Labeled, Small Unlabeled, Medium Labeled, and Medium Unlabeled, with 20 sets of each type. Low-level Visual Analytics Tasks include Retrieve Value, Filter, Compute Derived Value, Find Extremum, Sort, Determine Range, Characterize Distribution, Find Anomalies, Cluster, and Correlate. ","keywords":["Data Visualization, Large Language Models (LLM), Visual Analytics Tasks, Support Vector Graphics (SVG)"],"open_access_supplemental_link":"https://github.com/lebretou/SVG_taxonomy","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/pdf/2404.19097","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1186/v-short-1186_Preview.mp4?token=uWPsx3kUTnpqLdnhBYyj2x3p9kqHL8X3VG8HHeIsDvY&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"oEOBdI3DxCk","session_youtube_ff_link":"https://youtu.be/oEOBdI3DxCk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=1h6m29s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:48:00Z","title":"Exploring the Capability of LLMs in Performing Low-Level Visual Analytic Tasks on SVG Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1193","abstract":"We present LinkQ, a system that leverages a large language model (LLM) to facilitate knowledge graph (KG) query construction through natural language question-answering. Traditional approaches often require detailed knowledge of a graph querying language, limiting the ability for users - even experts - to acquire valuable insights from KGs. LinkQ simplifies this process by implementing a multistep protocol in which the LLM interprets a user's question, then systematically converts it into a well-formed query. LinkQ helps users iteratively refine any open-ended questions into precise ones, supporting both targeted and exploratory analysis. Further, LinkQ guards against the LLM hallucinating outputs by ensuring users' questions are only ever answered from ground truth KG data. We demonstrate the efficacy of LinkQ through a qualitative study with five KG practitioners. Our results indicate that practitioners find LinkQ effective for KG question-answering, and desire future LLM-assisted exploratory data analysis systems.","accessible_pdf":true,"authors":[{"affiliations":["MIT Lincoln Laboratory, Lexington, United States"],"email":"harry.li@ll.mit.edu","is_corresponding":true,"name":"Harry Li"},{"affiliations":["Tufts University, Medford, United States"],"email":"gabriel.appleby@gmail.com","is_corresponding":false,"name":"Gabriel Appleby"},{"affiliations":["MIT Lincoln Laboratory, Lexington, United States"],"email":"ashley.suh@ll.mit.edu","is_corresponding":false,"name":"Ashley Suh"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1193","image_caption":"Exemplar workflow for LinkQ, a system leveraging an LLM for refining natural language questions into knowledge graph queries. The (A) Chat Panel lets users communicate with the LLM to ask specific or open-ended questions. The Query Preview Panel consists of three components: the (B1) Query Editor, which supports interactive editing; the (B2) Entity-Relation Table, which provides mapped data IDs from the KG, helping to assess the correctness of the LLM's generated query; and the (B3) Query Graph, which visualizes the structure of the query to illustrate the underlying schema of the KG. Finally, the (C) Results Panel provides a cleaned, exportable table as well as an LLM-generated summary based on the query results. Importantly, LinkQ ensures all data retrieved and summarized by the LLM comes from ground truth in the KG. ","keywords":["Knowledge graphs, large language models, query construction, question-answering, natural language interfaces."],"open_access_supplemental_link":"https://github.com/mit-ll/linkq","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2406.06621","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1193/v-short-1193_Preview.mp4?token=QWEmKWvXRK6Q35t4UEn04PKOw9ngniepfJ0NRx1_Fxk&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"QfXSQxEjhuM","session_youtube_ff_link":"https://youtu.be/QfXSQxEjhuM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h48m51s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:30:00Z","title":"LinkQ: An LLM-Assisted Visual Interface for Knowledge Graph Question-Answering","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1224","abstract":"Diffusion-based generative models\u2019 impressive ability to create convincing images has garnered global attention. However, their complex structures and operations often pose challenges for non-experts to grasp. We present Diffusion Explainer, the first interactive visualization tool that explains how Stable Diffusion transforms text prompts into images. Diffusion Explainer tightly integrates a visual overview of Stable Diffusion\u2019s complex structure with explanations of the underlying operations. By comparing image generation of prompt variants, users can discover the impact of keyword changes on image generation. A 56-participant user study demonstrates that Diffusion Explainer offers substantial learning benefits to non-experts. Our tool has been used by over 10,300 users from 124 countries at https://poloclub.github.io/diffusion-explainer/.","accessible_pdf":true,"authors":[{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"seongmin@gatech.edu","is_corresponding":true,"name":"Seongmin Lee"},{"affiliations":["GA Tech, Atlanta, United States","IBM Research AI, Cambridge, United States"],"email":"benjamin.hoover@ibm.com","is_corresponding":false,"name":"Benjamin Hoover"},{"affiliations":["IBM Research AI, Cambridge, United States"],"email":"hendrik@strobelt.com","is_corresponding":false,"name":"Hendrik Strobelt"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"jayw@gatech.edu","is_corresponding":false,"name":"Zijie J. Wang"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"speng65@gatech.edu","is_corresponding":false,"name":"ShengYun Peng"},{"affiliations":["Georgia Institute of Technology , Atlanta , United States"],"email":"apwright@gatech.edu","is_corresponding":false,"name":"Austin P Wright"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"kevin.li@gatech.edu","is_corresponding":false,"name":"Kevin Li"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"haekyu@gatech.edu","is_corresponding":false,"name":"Haekyu Park"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"alexanderyang@gatech.edu","is_corresponding":false,"name":"Haoyang Yang"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"polo@gatech.edu","is_corresponding":false,"name":"Duen Horng (Polo) Chau"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1224","image_caption":"With Diffusion Explainer, users can visually examine how text prompt (e.g., \u201ca cute and adorable bunny... pixar character\u201d) is encoded by the Text Representation Generator into vectors to guide the Image Representation Refiner to iteratively refine the vector representation of the image being generated. The Timestep Controller enables users to review the incremental improvements in image quality and adherence to the prompt over timesteps. Diffusion Explainer tightly integrates a visual overview of Stable Diffusion\u2019s complex components with detailed explanations of their underlying operations, enabling users to fluidly transition between multiple levels of abstraction through animations and interactive elements.","keywords":["Machine Learning, Statistics, Modelling, and Simulation Applications; Software Prototype"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/pdf/2305.03509","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1224/v-short-1224_Preview.mp4?token=Gsc_ECc1b4rCDT_VV1OB8FzF3h6FXOzMo2Y0u5CfSmc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1224/v-short-1224_Preview.srt?token=2uEmmGpRHkM7GDs0bqg-aGpRfDyJcUhPq8zTQtGvo44&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"1En1p1RBKr4","session_youtube_ff_link":"https://youtu.be/1En1p1RBKr4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h10m0s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T17:54:00Z","title":"Diffusion Explainer: Visual Explanation for Text-to-image Stable Diffusion","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1057","abstract":"Real-world datasets often consist of quantitative and categorical variables. The analyst needs to focus on either kind separately or both jointly. We proposed a visualization technique tackling these challenges that supports visual cluster and set analysis. In this paper, we investigate how its visualization parameters affect the accuracy and speed of cluster and set analysis tasks in a controlled experiment. Our findings show that, with the proper settings, our visualization can support both task types well. However, we did not find settings suitable for the joint task, which provides opportunities for future research.","accessible_pdf":false,"authors":[{"affiliations":["TU Wien, Vienna, Austria"],"email":"nikolaus.piccolotto@tuwien.ac.at","is_corresponding":false,"name":"Nikolaus Piccolotto"},{"affiliations":["TU Wien, Vienna, Austria"],"email":"mwallinger@ac.tuwien.ac.at","is_corresponding":true,"name":"Markus Wallinger"},{"affiliations":["Institute of Visual Computing and Human-Centered Technology, Vienna, Austria"],"email":"miksch@ifs.tuwien.ac.at","is_corresponding":false,"name":"Silvia Miksch"},{"affiliations":["TU Wien, Vienna, Austria"],"email":"markus.boegl@tuwien.ac.at","is_corresponding":false,"name":"Markus B\u00f6gl"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1057","image_caption":"Our results show that layouts focused on multidimensional similarities supported a multidimensional cluster analysis task, layouts focused on set similarities supported set relation tasks, and neither layout supported the joint task well. ","keywords":["Visual cluster analysis, set visualization."],"open_access_supplemental_link":"https://osf.io/8gxzw/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://osf.io/zx9s6","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1057/v-short-1057_Preview.mp4?token=qKv-_jwVq8nvYMJYoaCoOCynyV0WJEeZi79eGjVM1Fw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1057/v-short-1057_Preview.srt?token=f0HmYwM8JFtI6Mcg0_EBs7-Ig5iE6VS68TdE2a7LZvc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"ah4dt96Yo1M","session_youtube_ff_link":"https://youtu.be/ah4dt96Yo1M","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h0m54s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T12:30:00Z","title":"On Combined Visual Cluster and Set Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1065","abstract":"Although many dimensionality reduction (DR) techniques employ stochastic methods for computational efficiency, such as negative sampling or stochastic gradient descent, their impact on the projection has been underexplored. In this work, we investigate how such stochasticity affects the stability of projections and present a novel DR technique, GhostUMAP, to measure the pointwise instability of projections. Our idea is to introduce clones of data points, \u201cghosts\u201d, into UMAP\u2019s layout optimization process. Ghosts are designed to be completely passive: they do not affect any others but are influenced by attractive and repulsive forces from the original data points. After a single optimization run, GhostUMAP can capture the projection instability of data points by measuring the variance with the projected positions of their ghosts. We also present a successive halving technique to reduce the computation of GhostUMAP. Our results suggest that GhostUMAP can reveal unstable data points with a reasonable computational overhead.","accessible_pdf":true,"authors":[{"affiliations":["Sungkyunkwan University, Suwon, Korea, Republic of"],"email":"mw.jung@skku.edu","is_corresponding":true,"name":"Myeongwon Jung"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"takanori.fujiwara@liu.se","is_corresponding":false,"name":"Takanori Fujiwara"},{"affiliations":["Sungkyunkwan University, Suwon, Korea, Republic of"],"email":"jmjo@skku.edu","is_corresponding":false,"name":"Jaemin Jo"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1065","image_caption":"Each projection is part of a GhostUMAP projection generated for the CIFAR-10 dataset. Case (A) depicts the trajectories of a stable point where the original projection (blue cross) and its ghosts (blue triangles) are projected to a consistent location. In contrast, Case (B) shows the trajectories of an unstable point. The trajectories diverge, implying instability in the final projection of the point (orange cross).","keywords":["Dimensionality Reduction"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1065/v-short-1065_Preview.mp4?token=S4Ft136d_boh8u-gXn-wAdiz9Aw_YFeVRCGqkHB9JGk&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"99IVMIqYnfA","session_youtube_ff_link":"https://youtu.be/99IVMIqYnfA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h55m43s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T13:24:00Z","title":"GhostUMAP: Measuring Pointwise Instability in Dimensionality Reduction","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1096","abstract":"Coordinated multiple views (CMV) in a visual analytics system can help users explore multiple data representations simultaneously with linked interactions. However, the implementation of coordinated multiple views can be challenging. Without standard software libraries, visualization designers need to re-implement CMV during the development of each system. We introduce use-coordination, a grammar and software library that supports the efficient implementation of CMV. The grammar defines a JSON-based representation for an abstract coordination model from the information visualization literature. We contribute an optional extension to the model and grammar that allows for hierarchical coordination. Through three use cases, we show that use-coordination enables implementation of CMV in systems containing not only basic statistical charts but also more complex visualizations such as medical imaging volumes. We describe six software extensions, including a graphical editor for manipulation of coordination, which showcase the potential to build upon our coordination-focused declarative approach. The software is open-source and available at https://use-coordination.dev.","accessible_pdf":true,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"mark_keller@hms.harvard.edu","is_corresponding":true,"name":"Mark S Keller"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"trevor_manz@g.harvard.edu","is_corresponding":false,"name":"Trevor Manz"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1096","image_caption":"Our use-coordination approach streamlines the implementation of coordinated multiple views (CMV) by leveraging a declarative grammar and embracing modern reactive user interface development frameworks. Use-coordination is flexible because it is decoupled from any particular data type or visualization approach.","keywords":["Visualization toolkits, visual analytics, domain specific languages"],"open_access_supplemental_link":"https://doi.org/10.17605/OSF.IO/SEJN5","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://doi.org/10.31219/osf.io/vhs7m","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1096/v-short-1096_Preview.mp4?token=BkrEnuXnDc6Qq3r-mvF-Xx8BXFUnzobudxpjY1msKvc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1096/v-short-1096_Preview.srt?token=-1IIZRjdDEGawXQSUfwnaBL9Zb7s1YT8EiH2LF6Ov7Q&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"yUeqo0sWUgU","session_youtube_ff_link":"https://youtu.be/yUeqo0sWUgU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=1h4m28s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T13:33:00Z","title":"Use-Coordination: Model, Grammar, and Library for Implementation of Coordinated Multiple Views","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1121","abstract":"Many real-world networks contain structurally-equivalent nodes. These are defined as vertices that share the same set of neighboring nodes, making them interchangeable with a traditional graph layout approach. However, many real-world graphs also have properties associated with nodes, adding additional meaning to them. We present an approach for swapping locations of structurally-equivalent nodes in graph layout so that those with more similar properties have closer proximity to each other. This improves the usefulness of the visualization from an attribute perspective without negatively impacting the visualization from a structural perspective. We include an algorithm for finding these sets of nodes in linear time, as well as methodologies for ordering nodes based on their attribute similarity, which works for scalar, ordinal, multidimensional, and categorical data.","accessible_pdf":false,"authors":[{"affiliations":["Pacific Northwest National Lab, Richland, United States"],"email":"patrick.mackey@pnnl.gov","is_corresponding":true,"name":"Patrick Mackey"},{"affiliations":["University of Arizona, Tucson, United States","Pacific Northwest National Laboratory, Richland, United States"],"email":"jacobmiller1@arizona.edu","is_corresponding":false,"name":"Jacob Miller"},{"affiliations":["Pacific Northwest National Laboratory, Richland, United States"],"email":"liz.f@pnnl.gov","is_corresponding":false,"name":"Liz Faultersack"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1121","image_caption":"An example of a property graph layout after having the structurally-equivalent nodes re-arranged based on their attribute similarity.","keywords":["graph drawing, network visualization, property graphs, attributed networks"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1121/v-short-1121_Preview.mp4?token=OUM0v9qW79f7TfsRaOeb77D6h02SwUbmQkWN-KwS6DE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1121/v-short-1121_Preview.srt?token=zTMNeUIQMHuumaHQigaiATn_3NTWlCLrcgsn3B4gIss&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"JrF56KcFXuU","session_youtube_ff_link":"https://youtu.be/JrF56KcFXuU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h19m12s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T12:48:00Z","title":"Improving Property Graph Layouts by Leveraging Attribute Similarity for Structurally Equivalent Nodes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1135","abstract":"Humans struggle to perceive and interpret high-dimensional data. Therefore, high-dimensional data are often projected into two dimensions for visualization. Many applications benefit from complex nonlinear dimensionality reduction techniques, but the effects of individual high-dimensional features are hard to explain in the two-dimensional space. Most visualization solutions use multiple two-dimensional plots, each showing the effect of one high-dimensional feature in two dimensions; this approach creates a need for a visual inspection of k plots for a k-dimensional input space. Our solution, Feature Clock, provides a novel approach that eliminates the need to inspect these k plots to grasp the influence of original features on the data structure depicted in two dimensions. Feature Clock enhances the explainability and compactness of visualizations of embedded data and is available in an open-source Python library.","accessible_pdf":true,"authors":[{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"ovcharenko.folga@gmail.com","is_corresponding":true,"name":"Olga Ovcharenko"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"rita.sevastjanova@inf.ethz.ch","is_corresponding":false,"name":"Rita Sevastjanova"},{"affiliations":["ETH Zurich, Z\u00fcrich, Switzerland"],"email":"valentina.boeva@inf.ethz.ch","is_corresponding":false,"name":"Valentina Boeva"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1135","image_caption":"Feature Clock uses high-dimensional data, and shows the largest contribution of each high-dimensional feature in two-dimensional space.","keywords":["High-dimensional data, nonlinear dimensionality reduction, feature importance, visualization"],"open_access_supplemental_link":"https://github.com/OlgaOvcharenko/feature_clock_visualization","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1135/v-short-1135_Preview.mp4?token=Uyqr0ftF16lo7feojMYgYvpEeIgXN8ryZJSrHS7eDK4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1135/v-short-1135_Preview.srt?token=kLhkJ66oQpLM2hA0d3adRQIDUWOKTCFpj0d4B-1jquU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"jKrGV7L6pFY","session_youtube_ff_link":"https://youtu.be/jKrGV7L6pFY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h37m27s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T13:06:00Z","title":"Feature Clock: High-Dimensional Effects in Two-Dimensional Plots","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1156","abstract":"Compound graphs are networks in which vertices can be grouped into larger subsets, with these subsets capable of further grouping, resulting in a nesting that can be many levels deep. In several applications, including biological workflows, chemical equations, and computational data flow analysis, these graphs often exhibit a tree-like nesting structure, where sibling clusters are disjoint. Common compound graph layouts prioritize the lowest level of the grouping, down to the individual ungrouped vertices, which can make the higher level grouped structures more difficult to discern, especially in deeply nested networks. Leveraging the additional structure of the tree-like nesting, we contribute an overview+detail layout for this class of compound graphs that preserves the saliency of the higher level network structure when groups are expanded to show internal nested structure. Our layout draws inner structures adjacent to their parents, using a modified tree layout to place substructures. We describe our algorithm and then present case studies demonstrating the layout's utility to a domain expert working on data flow analysis. Finally, we discuss network parameters and analysis situations in which our layout is well suited.","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"hatch.on27@gmail.com","is_corresponding":true,"name":"Chang Han"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"lieffers@arizona.edu","is_corresponding":false,"name":"Justin Lieffers"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"claytonm@arizona.edu","is_corresponding":false,"name":"Clayton Morrison"},{"affiliations":["The University of Utah, Salt Lake City, United States"],"email":"kisaacs@sci.utah.edu","is_corresponding":false,"name":"Katherine E. Isaacs"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1156","image_caption":"An illustration of our proposed variant of Reingold-Tilford algorithm. The input data is shown in both our layout and a tree view without inner structure. As we follow the RT bottom-up placement, we place group parents with respect to expanded children based on the position of their corresponding internal node. We then make separation passes in both directions of tree expansion.","keywords":["compound graphs, network layout, graph drawing, network visualization, graph visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.04045","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1156/v-short-1156_Preview.mp4?token=i-sxI1vy8rJyeDsR7LjNQfCNMoewFjGySDcH_2WTyaI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1156/v-short-1156_Preview.srt?token=DCH2aWCrsMk4wZ-3G0MiS_Vw0M_6KHoo974zKrdhyT8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"PlQT_Hpz0zg","session_youtube_ff_link":"https://youtu.be/PlQT_Hpz0zg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h9m56s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T12:39:00Z","title":"An Overview + Detail Layout for Visualizing Compound Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1173","abstract":"Visualizing citation relations with network structures is widely used, but the visual complexity can make it challenging for individual researchers to navigate through them. We collected data from 18 researchers using an interface that we designed using network simplification methods and analyzed how users browsed and identified important papers. Our analysis reveals six major patterns used for identifying papers of interest, which can be categorized into three key components: Fields, Bridges, and Foundations, each viewed from two distinct perspectives: layout-oriented and connection-oriented. The connection-oriented approach was found to be more reliable for selecting relevant papers, but the layout-oriented method was adopted more often, even though it led to unexpected results and user frustration. Our findings emphasize the importance of integrating these components and the necessity to balance visual layouts with meaningful connections to enhance the effectiveness of citation networks in academic browsing systems.","accessible_pdf":true,"authors":[{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"krchoe@hcil.snu.ac.kr","is_corresponding":true,"name":"Kiroong Choe"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"gracekim027@snu.ac.kr","is_corresponding":false,"name":"Eunhye Kim"},{"affiliations":["Dept. of Electrical and Computer Engineering, SNU, Seoul, Korea, Republic of"],"email":"paulmoguri@snu.ac.kr","is_corresponding":false,"name":"Sangwon Park"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"jseo@snu.ac.kr","is_corresponding":false,"name":"Jinwook Seo"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1173","image_caption":"We identified six patterns that researchers utilize to browse citation networks and discover papers of interest. Component-wise, these patterns can be classified to: Field (i.e., related papers on a single research topic), Bridge (i.e., logical connections between papers or topics), and Foundation (i.e., stages in the broad development of research). For each component, there were two different perspectives: layout-oriented or connection-oriented. Our analysis suggests that researchers generally preferred the layout-oriented perspective for its intuitiveness, but papers identified through the connection-oriented perspective were typically more useful.","keywords":["Literature search, network visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2405.07267","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1173/v-short-1173_Preview.mp4?token=yxKADsYdJzT9lfoXD_QC0ilL3z2091KwudYj09XOTmw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1173/v-short-1173_Preview.srt?token=f_Xk6ld97NrVlsDS6hylB1ssCIBKRRFAsDWK07vQ5Nw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"AlqlP1Rto84","session_youtube_ff_link":"https://youtu.be/AlqlP1Rto84","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h28m32s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T12:57:00Z","title":"Fields, Bridges, and Foundations: How Researchers Browse Citation Network Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1235","abstract":"A high number of samples often leads to occlusion in scatterplots, which hinders data perception and analysis. De-cluttering approaches based on spatial transformation reduce visual clutter by remapping samples using the entire available scatterplot domain. Such regularized scatterplots may still be used for data analysis tasks, if the spatial transformation is smooth and preserves the original neighborhood relations of samples. Recently, Rave et al. proposed an efficient regularization method based on integral images. We propose a generalization of their regularization scheme using sector-based transformations with the aim of increasing sample uniformity of the resulting scatterplot. We document the improvement of our approach using various uniformity measures.","accessible_pdf":false,"authors":[{"affiliations":["University of M\u00fcnster, M\u00fcnster, Germany"],"email":"hennes.rave@uni-muenster.de","is_corresponding":true,"name":"Hennes Rave"},{"affiliations":["University of M\u00fcnster, M\u00fcnster, Germany"],"email":"molchano@uni-muenster.de","is_corresponding":false,"name":"Vladimir Molchanov"},{"affiliations":["University of M\u00fcnster, M\u00fcnster, Germany"],"email":"linsen@uni-muenster.de","is_corresponding":false,"name":"Lars Linsen"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1235","image_caption":"Sector-based transformation of a UMAP embedding of the Iris dataset. 16 sectors and anchor points for a selected sample are shown for the original scatterplot. The black anchor point at the bottom belongs to the highlighted sector at the top. Samples are moved toward a sector's anchor point based on the point density inside that sector. The resulting displacement vector is shown in blue.","keywords":["Scatterplot de-cluttering, spatial transformation."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1235/v-short-1235_Preview.mp4?token=cvnyCz3mlT9aNamwFo0HrmecGd8sSWZosP2uo4-O4go&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"CF_fK_gXpZU","session_youtube_ff_link":"https://youtu.be/CF_fK_gXpZU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h47m15s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T13:15:00Z","title":"Uniform Sample Distribution in Scatterplots via Sector-based Transformation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1049","abstract":"This comparative study evaluates various neural surface reconstruction methods, particularly focusing on their implications for scientific visualization through reconstructing 3D surfaces via multi-view rendering images. We categorize ten methods into neural radiance fields and neural implicit surfaces, uncovering the benefits of leveraging distance functions (i.e., SDFs and UDFs) to enhance the accuracy and smoothness of the reconstructed surfaces. Our findings highlight the efficiency and quality of NeuS2 for reconstructing closed surfaces and identify NeUDF as a promising candidate for reconstructing open surfaces despite some limitations. By sharing our benchmark dataset, we invite researchers to test the performance of their methods, contributing to the advancement of surface reconstruction solutions for scientific visualization.","accessible_pdf":true,"authors":[{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"syao2@nd.edu","is_corresponding":true,"name":"Siyuan Yao"},{"affiliations":["Wuhan University, Wuhan, China"],"email":"song.wx@whu.edu.cn","is_corresponding":false,"name":"Weixi Song"},{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"chaoli.wang@nd.edu","is_corresponding":false,"name":"Chaoli Wang"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1049","image_caption":"We selected 10 representative surface reconstruction methods and created 9 datasets for evaluation. Each dataset comprises 42 images for training and 181 images for testing. After training the models, we used them to generate neural surface rendering images and reconstruct surface polygon meshes. The synthesized results were evaluated using peak signal-to-noise ratio (PSNR), learned perceptual image patch similarity (LPIPS) against ground truth images, and chamfer distance against the ground truth surface mesh. We also comprehensively analyzed the results, including model design and performance.","keywords":["Machine Learning Techniques, Datasets"],"open_access_supplemental_link":"https://www.kaggle.com/datasets/syaond/scivis-surface-dataset/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.20868","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1049/v-short-1049_Preview.mp4?token=uoTYztiyhPmlmrydoqmYzohPfMPzTYBsVUdGvLxD_WQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1049/v-short-1049_Preview.srt?token=LrchUQH0UK--wYWJWgdYgRt-EeJDWRM6Hl2rKh5bRrs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"gC0jSUB5PvU","session_youtube_ff_link":"https://youtu.be/gC0jSUB5PvU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h28m20s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:27:00Z","title":"A Comparative Study of Neural Surface Reconstruction for Scientific Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1054","abstract":"Direct volume rendering using ray-casting is widely used in practice. By using GPUs and applying acceleration techniques as empty space skipping, high frame rates are possible on modern hardware.This enables performance-critical use-cases such as virtual reality volume rendering. The currently fastest known technique uses volumetric distance maps to skip empty sections of the volume during ray-casting but requires the distance map to be updated per transfer function change. In this paper, we demonstrate a technique for subdividing the volume intensity range into partitions and deriving what we call partitioned distance maps. These can be used to accelerate the distance map computation for a newly changed transfer function by a factor up to 30. This allows the currently fastest known empty space skipping approach to be used while maintaining high frame rates even when the transfer function is changed frequently.","accessible_pdf":true,"authors":[{"affiliations":["University of Applied Sciences Wiener Neustadt, Wiener Neustadt, Austria"],"email":"michael.rauter@fhwn.ac.at","is_corresponding":true,"name":"Michael Rauter"},{"affiliations":["Medical University of Vienna, Vienna, Austria"],"email":"lukas.a.zimmermann@meduniwien.ac.at","is_corresponding":false,"name":"Lukas Zimmermann"},{"affiliations":["University of Applied Sciences Wiener Neustadt, Wiener Neustadt, Austria"],"email":"markus.zeilinger@fhwn.ac.at","is_corresponding":false,"name":"Markus Zeilinger"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1054","image_caption":"Direct volume renderings of the manix dataset applying distinct transfer functions. Distance map based empty space skipping can be used to accelerate rendering. Different transfer functions result in different distance maps as indicated in the image. Therefore, it is required to recompute the distance map on a transfer function update. In the paper, we demonstrate how to compute the distance map faster than before by computing what we call partitioned distance maps as a preprocessing step, and combining them into the final distance map at runtime.","keywords":["Computing methodologies\u2014Computer graphics\u2014Rendering, Theory of computation\u2014Design and analysis of algorithms\u2014Data structures design and analysis."],"open_access_supplemental_link":"https://osf.io/n5k6z","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.21552","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1054/v-short-1054_Preview.mp4?token=cb8cLTNiWrnvMHsDzo4kJfGqvCTgwuvoHALUniFGIR8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"De6SwX2KSV4","session_youtube_ff_link":"https://youtu.be/De6SwX2KSV4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h0m20s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:00:00Z","title":"Accelerating Transfer Function Update for Distance Map based Volume Rendering","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1119","abstract":"Analyzing uncertainty in spatial data is a vital task in many domains, as for example with climate and weather simulation ensembles. Although many methods support the analysis of uncertain 2D data, such as uncertain isocontours or overlaying of statistical information on plots of the actual data, it is still a challenge to get a more detailed overview of 2D data together with its statistical properties. We present cumulative height fields, a visualization method for 2D scalar field ensembles using the marginal empirical distribution function and show preliminary results using volume rendering and slicing for the Max Planck Institute Grand Ensemble.","accessible_pdf":false,"authors":[{"affiliations":["Institute of Computer Science, Leipzig University, Leipzig, Germany"],"email":"daetz@informatik.uni-leipzig.de","is_corresponding":true,"name":"Tomas Daetz"},{"affiliations":["German Climate Computing Center (DKRZ), Hamburg, Germany"],"email":"boettinger@dkrz.de","is_corresponding":false,"name":"Michael B\u00f6ttinger"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"scheuermann@informatik.uni-leipzig.de","is_corresponding":false,"name":"Gerik Scheuermann"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"heine@informatik.uni-leipzig.de","is_corresponding":false,"name":"Christian Heine"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1119","image_caption":"Precipitation change (%) in 2080-2099 relative to 1986-2005 based on 100 simulation runs of the RCP8.5 scenario within MPI-GE. (a) shows a direct volume rendering of the cumulative height field using a 2D transfer function, mapping cumulative probabilities to opacity and precipitation change to color (blue: increase, red: decrease), and an isosurface of the median. (d) shows an orthographic view from the top. The intersection of the black lines show the point of interest (0\u00b0, 170\u00b0W). (b) and (c) show the cumulative function graphs along each component of the point of interest. The purple lines depict the zero percent difference. ","keywords":["Scalar field visualization, ensemble visualization, volume rendering, nonparametric statistics."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1119/v-short-1119_Preview.mp4?token=it5hcr8mic5-t_R9wL8DHtwmG8pL9-YBm8UwxJG2i7A&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1119/v-short-1119_Preview.srt?token=pa9nm3tAGNmlCt9qIZjWGXj1IRf6yQ7cyItR1aCBEBg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"muHSHH_zJK8","session_youtube_ff_link":"https://youtu.be/muHSHH_zJK8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h37m10s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:36:00Z","title":"Visualization of 2D Scalar Field Ensembles Using Volume Visualization of the Empirical Distribution Function","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1127","abstract":"In this paper, we analyze the Apple Vision Pro hardware and the visionOS software platform, assessing their capabilities for volume rendering of structured grids---a prevalent technique across various applications. The Apple Vision Pro supports multiple display modes, from classical augmented reality (AR) using video see-through technology to immersive virtual reality (VR) environments that exclusively render virtual objects. These modes utilize different APIs and exhibit distinct capabilities. Our focus is on direct volume rendering, selected for its implementation challenges due to the native graphics APIs being predominantly oriented towards surface shading. Volume rendering is particularly vital in fields where AR and VR visualizations offer substantial benefits, such as in medicine and manufacturing. Despite its initial high cost, we anticipate that the Vision Pro will become more accessible and affordable over time, following Apple's track record of market expansion. As these devices become more prevalent, understanding how to effectively program and utilize them becomes increasingly important, offering significant opportunities for innovation and practical applications in various sectors.","accessible_pdf":false,"authors":[{"affiliations":["University of Duisburg-Essen, Duisburg, Germany"],"email":"camilla.hrycak@uni-due.de","is_corresponding":true,"name":"Camilla Hrycak"},{"affiliations":["University of Duisburg-Essen, Duisburg, Germany"],"email":"david.lewakis@stud.uni-due.de","is_corresponding":false,"name":"David Lewakis"},{"affiliations":["University of Duisburg-Essen, Duisburg, Germany"],"email":"jens.krueger@uni-due.de","is_corresponding":false,"name":"Jens Harald Krueger"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1127","image_caption":"Screenshots of our testbed direct volume rendering application on the Apple Vision Pro. From Top: Slice-based volume rendering in a shared space with video see-through, f Bottom: Rendering the dataset in a fully immersive space. Notice varying image quality across the figures due to active foveation.","keywords":["Apple Vision Pro, Volume Rendering, Virtual Reality, Augmented Reality"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://www.cgvis.de/publications/2024/hrycak_2024_vision1.pdf","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1127/v-short-1127_Preview.mp4?token=EF0pYVl1SFuGLoQVOYV5qGbXuSquoNn8ED0zBmwTrwY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1127/v-short-1127_Preview.srt?token=GGFYa8zsYzU02kMeu2J9cBwMjRY471VZhuaEsPlx76w&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"C09ujoXAnWg","session_youtube_ff_link":"https://youtu.be/C09ujoXAnWg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h19m5s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:18:00Z","title":"Investigating the Apple Vision Pro Spatial Computing Platform for GPU-Based Volume Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1155","abstract":"Augmented reality (AR) area labels can visualize real world regions with arbitrary boundaries and show invisible objects or features. But environment conditions such as lighting and clutter can decrease fixed or passive label visibility, and labels that have high opacity levels can occlude crucial details in the environment. We design and evaluate active AR area label visualization modes to enhance visibility across real-life environments, while still retaining environment details within the label. For this, we define a distant characteristic color from the environment in perceptual CIELAB space, then introduce spatial variations among label pixel colors based on the underlying environment variation. In a user study with 18 participants, we found that our active label visualization modes can be comparable in visibility to a fixed green baseline by Gabbard et al., and can outperform it with added spatial variation in cluttered environments, across varying levels of lighting (e.g., nighttime), and in environments with colors similar to the fixed baseline color.","accessible_pdf":false,"authors":[{"affiliations":["Brown University, Providence, United States"],"email":"hojung_kwon@brown.edu","is_corresponding":true,"name":"Hojung Kwon"},{"affiliations":["Brown University, Providence, United States"],"email":"yuanbo_li@brown.edu","is_corresponding":false,"name":"Yuanbo Li"},{"affiliations":["Brown University, Providence, United States"],"email":"chloe_ye2019@hotmail.com","is_corresponding":false,"name":"Xiaohan Ye"},{"affiliations":["Brown University, Providence, United States"],"email":"praccho_muna-mcquay@brown.edu","is_corresponding":false,"name":"Praccho Muna-McQuay"},{"affiliations":["Duke University, Durham, United States"],"email":"liuren.yin@duke.edu","is_corresponding":false,"name":"Liuren Yin"},{"affiliations":["Brown University, Providence, United States"],"email":"james_tompkin@brown.edu","is_corresponding":false,"name":"James Tompkin"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1155","image_caption":"Top left: If an AR area label has a similar color to the environment, we cannot easily see the label. Top right: If the label is too opaque, it occludes the environment. Bottom left: We automatically change label colors to increase visibility. Bottom right: We add spatial variation within a label to reduce background occlusion. (Background image source: Dubai360, 8K 360 Degree Timelapse of Dubai Marina) ","keywords":["Augmented reality, active labels, environment-adaptive"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1155/v-short-1155_Preview.mp4?token=dpGKqhvLa6sMi3Edw8lp-dvqWiXCdIARUWf0TC-HX84&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1155/v-short-1155_Preview.srt?token=iYyTyQyGsTVOYivhq8Te7feVuQfNyhsknqjkGJdIT5k&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"O978Fqk58Fw","session_youtube_ff_link":"https://youtu.be/O978Fqk58Fw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=1h4m35s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T17:03:00Z","title":"Active Appearance and Spatial Variation Can Improve Visibility in Area Labels for Augmented Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1183","abstract":"An atmospheric front is an imaginary surface that separates two distinct air masses and is commonly defined as the warm-air side of a frontal zone with high gradients of atmospheric temperature and humidity (Fig. 1, left). These fronts are a widely used conceptual model in meteorology, which are often encountered in the literature as two-dimensional (2D) front lines on surface analysis charts. This paper presents a method for computing three-dimensional (3D) atmospheric fronts as surfaces that is capable of extracting continuous and well-confined features suitable for 3D visual analysis, spatio- temporal tracking, and statistical analyses (Fig. 1, middle, right). Recently developed contour-based methods for 3D front extraction rely on computing the third derivative of a moist potential temperature field. Additionally, they require the field to be smoothed to obtain continuous large-scale structures. This paper demonstrates the feasibility of an alternative method to front extraction using ridge surface computation. The proposed method requires only the second derivative of the input field and produces accurate structures even from unsmoothed data. An application of the ridge-based method to a data set corresponding to Cyclone Friederike demonstrates its benefits and utility towards visual analysis of the full 3D structure of fronts.","accessible_pdf":false,"authors":[{"affiliations":["Zuse Institute Berlin, Berlin, Germany"],"email":"anne.gossing@fu-berlin.de","is_corresponding":true,"name":"Anne Gossing"},{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"andreas.beckert@uni-hamburg.de","is_corresponding":false,"name":"Andreas Beckert"},{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"christoph.fischer-1@uni-hamburg.de","is_corresponding":false,"name":"Christoph Fischer"},{"affiliations":["Zuse Institute Berlin, Berlin, Germany"],"email":"klenert@zib.de","is_corresponding":false,"name":"Nicolas Klenert"},{"affiliations":["Indian Institute of Science, Bangalore, India"],"email":"vijayn@iisc.ac.in","is_corresponding":false,"name":"Vijay Natarajan"},{"affiliations":["Freie Universit\u00e4t Berlin, Berlin, Germany"],"email":"george.pacey@fu-berlin.de","is_corresponding":false,"name":"George Pacey"},{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"thorwin.vogt@uni-hamburg.de","is_corresponding":false,"name":"Thorwin Vogt"},{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"marc.rautenhaus@uni-hamburg.de","is_corresponding":false,"name":"Marc Rautenhaus"},{"affiliations":["Zuse Institute Berlin, Berlin, Germany"],"email":"baum@zib.de","is_corresponding":false,"name":"Daniel Baum"}],"award":"honorable","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1183","image_caption":"Atmospheric fronts play a significant role in mid-latitude weather dynamics and are responsible for 50% - and locally up to 90% - of extreme precipitation. To support visual analysis of frontal processes, in this paper we present a ridge-based approach for the extraction and visualization of three-dimensional atmospheric fronts. Current contour-based visualization techniques require data smoothing that can lead to local inaccuracies, whereas our ridge detection algorithm extracts fronts as continuous surfaces without smoothing. This preserves the original data resolution, thereby facilitating the investigation of small-scale processes in frontal environments. ","keywords":["Atmospheric front, ridge surface, visual analysis."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1183/v-short-1183_Preview.mp4?token=07DX41XUOHJ0wsi8xtxI1j34lYN1AbYb3Uz6Cb4WH70&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"G6iZGuhjBf4","session_youtube_ff_link":"https://youtu.be/G6iZGuhjBf4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h9m31s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:09:00Z","title":"A Ridge-based Approach for Extraction and Visualization of 3D Atmospheric Fronts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1211","abstract":"Transfer function design is crucial in volume rendering, as it directly influences the visual representation and interpretation of volumetric data. However, creating effective transfer functions that align with users' visual objectives is often challenging due to the complex parameter space and the semantic gap between transfer function values and features of interest within the volume. In this work, we propose a novel approach that leverages recent advancements in language-vision models to bridge this semantic gap. By employing a fully differentiable rendering pipeline and an image-based loss function guided by language descriptions, our method generates transfer functions that yield volume-rendered images closely matching the user's intent. We demonstrate the effectiveness of our approach in creating meaningful transfer functions from simple descriptions, empowering users to intuitively express their desired visual outcomes with minimal effort. This advancement streamlines the transfer function design process and makes volume rendering more accessible to a wider range of users.","accessible_pdf":true,"authors":[{"affiliations":["Vanderbilt University, Nashville, United States"],"email":"sangwon.jeong@vanderbilt.edu","is_corresponding":true,"name":"Sangwon Jeong"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jixianli@sci.utah.edu","is_corresponding":false,"name":"Jixian Li"},{"affiliations":["Lawrence Livermore National Laboratory , Livermore, United States"],"email":"shusenl@sci.utah.edu","is_corresponding":false,"name":"Shusen Liu"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"},{"affiliations":["Vanderbilt University, Nashville, United States"],"email":"matthew.berger@vanderbilt.edu","is_corresponding":false,"name":"Matthew Berger"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1211","image_caption":"A gallery of volume renderings found using Text-2-Transfer Function method. Our method can produce transfer functions focusing on various visual properties such as color, material, or abstract concepts such as \u201ccinematic.\u201d","keywords":["Transfer function design, vision-language model"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2406.15634","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1211/v-short-1211_Preview.mp4?token=Dxy0WyVTUlGxl3ru8PfJoSVnbTBqd4KZ9Cm8gWcfLCk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1211/v-short-1211_Preview.srt?token=zX4eGNJo_w-PwbJLOF9-awAxMGW1LNJqLUpOiqCvdig&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"wtl-zKpboLg","session_youtube_ff_link":"https://youtu.be/wtl-zKpboLg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h46m21s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:45:00Z","title":"Text-based transfer function design for semantic volume rendering","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1292","abstract":"Collaborative planning for congenital heart diseases typically involves creating physical heart models through 3D printing, which are then examined by both surgeons and cardiologists. Recent developments in mobile augmented reality (AR) technologies have presented a viable alternative, known for their ease of use and portability. However, there is still a lack of research examining the utilization of multi-user mobile AR environments to support collaborative planning for cardiovascular surgeries. We created ARCollab, an iOS AR app designed for enabling multiple surgeons and cardiologists to interact with a patient's 3D heart model in a shared environment. ARCollab enables surgeons and cardiologists to import heart models, manipulate them through gestures and collaborate with other users, eliminating the need for fabricating physical heart models. Our evaluation of ARCollab's usability and usefulnessin enhancing collaboration, conducted with three cardiothoracic surgeons and two cardiologists, marks the first human evaluation of a multi-user mobile AR tool for surgical planning. ARCollab is open-source, available at https://github.com/poloclub/arcollab.","accessible_pdf":true,"authors":[{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"pratham.mehta001@gmail.com","is_corresponding":true,"name":"Pratham Darrpan Mehta"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"rnarayanan39@gatech.edu","is_corresponding":false,"name":"Rahul Ozhur Narayanan"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"harsha5431@gmail.com","is_corresponding":false,"name":"Harsha Karanth"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"alexanderyang@gatech.edu","is_corresponding":false,"name":"Haoyang Yang"},{"affiliations":["Emory University, Atlanta, United States"],"email":"slesnickt@kidsheart.com","is_corresponding":false,"name":"Timothy C Slesnick"},{"affiliations":["Emory University/Children's Healthcare of Atlanta, Atlanta, United States"],"email":"fawwaz.shaw@choa.org","is_corresponding":false,"name":"Fawwaz Shaw"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"polo@gatech.edu","is_corresponding":false,"name":"Duen Horng (Polo) Chau"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1292","image_caption":"ARCollab is a collaborative cardiovascular surgical planning application in mobile augmented reality. Multiple users can join a shared session and view a patient's 3D heart model from different perspectives. ARCollab allows surgeons and cardiologists to collaboratively interact with a 3D heart model in real-time. Our evaluation of ARCollab's usability and usefulness in enhancing collaboration, conducted with three cardiothoracic surgeons and two cardiologists, marks the first human evaluation of a multi-user mobile AR tool for surgical planning. ARCollab is open-source, available at https://github.com/poloclub/arcollab. ","keywords":["Augmented Reality, Mobile Collaboration, Surgical Planning"],"open_access_supplemental_link":"https://github.com/poloclub/arcollab","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.03249","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1292/v-short-1292_Preview.mp4?token=JCa98Nvp5ZJr4ODeYdcidV_gsxCoxlhfyo11Q6ciMY0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1292/v-short-1292_Preview.srt?token=qGpinD0qCxaulavLG2bfshSvMgq5n_bKTX0HAb2kwAs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"iZMV5ADTBO4","session_youtube_ff_link":"https://youtu.be/iZMV5ADTBO4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h56m10s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:54:00Z","title":"Multi-User Mobile Augmented Reality for Cardiovascular Surgical Planning","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1059","abstract":"Gantt charts are a widely-used idiom for visualizing temporal discrete event sequence data where dependencies exist between events. They are popular in domains such as manufacturing and computing for their intuitive layout of such data. However, these domains frequently generate data at scales which tax both the visual representation and the ability to render it at interactive speeds. To aid visualization developers who use Gantt charts in these situations, we develop a task taxonomy of low level visualization tasks supported by Gantt charts and connect them to the data queries needed to support them. Our taxonomy is derived through a literature survey of visualizations using Gantt charts over the past 30 years.","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"sayefsakin@sci.utah.edu","is_corresponding":true,"name":"Sayef Azad Sakin"},{"affiliations":["The University of Utah, Salt Lake City, United States"],"email":"kisaacs@sci.utah.edu","is_corresponding":false,"name":"Katherine E. Isaacs"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1059","image_caption":"Gantt charts are popular in project planning, process scheduling, and progress tracking for visualizing interdependent temporal event sequences. Typically, data is organized by temporal order on one axis and the other by grouping events with relevant factors. Our literature-based visualization task taxonomy helps in designing Gantt charts with large number of events by aligning prevalent visual tasks with relevant data queries. These provide a foundation for identifying and developing data management strategies to scale up visual interactivity in Gantt Charts.","keywords":["Gantt chart\u2014Visualization\u2014Task taxonomy"],"open_access_supplemental_link":"https://osf.io/8k79r/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.04050","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1059/v-short-1059_Preview.mp4?token=AA-7sRm2dRmgNqzQ9NbGsKAYKGiJVgGMXQzn8vK3ySo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1059/v-short-1059_Preview.srt?token=iIARTsHoNlynFZ_r5Ktv12EVvzBs79LzQgF7UYJ4jCg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"a85BN_1AgEE","session_youtube_ff_link":"https://youtu.be/a85BN_1AgEE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h45m30s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T13:15:00Z","title":"A Literature-based Visualization Task Taxonomy for Gantt Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1072","abstract":"Recent advancements in vision models have greatly improved their ability to handle complex chart understanding tasks, like chart captioning and question answering. However, it remains challenging to assess how these models process charts. Existing benchmarks only roughly evaluate model performance without evaluating the underlying mechanisms, such as how models extract image embeddings. This limits our understanding of the model's ability to perceive fundamental graphical components. To address this, we introduce a novel evaluation framework to assess the graphical perception of image embedding models. For chart comprehension, we examine two main aspects of channel effectiveness: accuracy and discriminability of various visual channels. Channel accuracy is assessed through the linearity of embeddings, measuring how well the perceived magnitude aligns with the size of the stimulus. Discriminability is evaluated based on the distances between embeddings, indicating their distinctness. Our experiments with the CLIP model show that it perceives channel accuracy differently from humans and shows unique discriminability in channels like length, tilt, and curvature. We aim to develop this work into a broader benchmark for reliable visual encoders, enhancing models for precise chart comprehension and human-like perception in future applications.","accessible_pdf":false,"authors":[{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"dtngus0111@gmail.com","is_corresponding":true,"name":"Soohyun Lee"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"jangsus1@snu.ac.kr","is_corresponding":false,"name":"Minsuk Chang"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"shpark@hcil.snu.ac.kr","is_corresponding":false,"name":"Seokhyeon Park"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"jseo@snu.ac.kr","is_corresponding":false,"name":"Jinwook Seo"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1072","image_caption":"An image showing how differently the image embedding model perceives changes in different visual channels. Peaks represent thresholds where the model perceives significant differences between images, indicating the discriminability of each channel.","keywords":["Graphical perception, channel effectiveness, image embeddings, clip"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.20845","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1072/v-short-1072_Preview.mp4?token=oWDGK7OGFYWlhpETPpd7Cs6OTMqdpsTMLkMzxcdeBcE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"1o5g7_3J40g","session_youtube_ff_link":"https://youtu.be/1o5g7_3J40g","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h26m40s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T12:57:00Z","title":"Assessing Graphical Perception of Image Embedding Models using Channel Effectiveness","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1081","abstract":"Sine illusion happens when the more quickly changing pairs of lines lead to bigger underestimates of the delta between them.We evaluate three visual manipulations on mitigating sine illusions: dotted lines, aligned gridlines, and offset gridlines via a user study. We asked participants to compare the deltas between two lines at two time points and found aligned gridlines to be the most effective in mitigating sine illusions.Using data from the user study, we produced a model that predicts the impact of the sine illusion in line charts by accounting for the ratio of the vertical distance between the two points of comparison. When the ratio is less than 50\\%, participants begin to be influenced by the sine illusion. This effect can be significantly exacerbated when the difference between the two deltas falls under 30\\%.We compared two explanations for the sine illusion based on our data: either participants were mistakenly using the perpendicular distance between the two lines to make their comparison (the perpendicular explanation), or they incorrectly relied on the length of the line segment perpendicular to the angle bisector of the bottom and top lines (the equal triangle explanation). We found the equal triangle explanation to be the more predictive model explaining participant behaviors.","accessible_pdf":true,"authors":[{"affiliations":["Google LLC, San Francisco, United States"],"email":"cknit1999@gmail.com","is_corresponding":false,"name":"Clayton J Knittel"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"jawuah3@gatech.edu","is_corresponding":false,"name":"Jane Awuah"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"franconeri@northwestern.edu","is_corresponding":false,"name":"Steven L Franconeri"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"cxiong@gatech.edu","is_corresponding":true,"name":"Cindy Xiong Bearfield"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1081","image_caption":"Looking at this visualization of two lines depicting the revenue of two products over time. Product A is consistently doing better than Product B, and thus have higher revenue throughout time. Both products' revenue are growing, with their line slopes increasing over time. Your task it to compare whether the difference between their revenue, or the deltas between the two lines, are bigger at an earlier time (Time 1), or a later time (Time 2). While it may be tempting to say the difference is bigger at Time 1, the correct answer is Time 2. This is a visual illusion commonly referred to as the sine illusion. It is an underestimation of the difference between two lines when both lines have increasing slopes.","keywords":["sine illusion, gridlines, perception, bias, thresholds"],"open_access_supplemental_link":"https://osf.io/kq87n/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"http://arxiv.org/abs/2408.00854","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1081/v-short-1081_Preview.mp4?token=YKdYyRVKX0qJ9XXs4G9WUwY_svqHkLGqf694bT-Kj5M&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1081/v-short-1081_Preview.srt?token=jRuND1m9uPluMnSvV5z9FR-91RLjE2qTAMQ5uA0zIN0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"siQIBjM26Wg","session_youtube_ff_link":"https://youtu.be/siQIBjM26Wg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=1h5m25s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T13:33:00Z","title":"Gridlines Mitigate Sine Illusion in Line Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1109","abstract":"Homophily refers to the tendency of individuals to associate with others who are similar to them in characteristics, such as, race, ethnicity, age, gender, or interests. In this paper, we investigate if individuals exhibit racial homophily when viewing visualizations, using mass shooting data in the United States as the example topic. We conducted a crowdsourced experiment (N=450) where each participant was shown a visualization displaying the counts of mass shooting victims, highlighting the counts for one of three racial groups (White, Black, or Hispanic). Participants were assigned to view visualizations highlighting their own race or a different race to assess the influence of racial concordance on changes in affect (emotion) and attitude towards gun control. While we did not find evidence of homophily, the results showed a significant negative shift in affect across all visualization conditions. Notably, political ideology significantly impacted changes in affect, with more liberal views correlating with a more negative affect change. Our findings underscore the complexity of reactions to mass shooting visualizations and suggest that future research should consider various methodological improvements to better assess homophily effects.","accessible_pdf":false,"authors":[{"affiliations":["New York University, Brooklyn, United States"],"email":"pt2393@nyu.edu","is_corresponding":true,"name":"Poorna Talkad Sukumar"},{"affiliations":["New York University, Brooklyn, United States"],"email":"mporfiri@nyu.edu","is_corresponding":false,"name":"Maurizio Porfiri"},{"affiliations":["New York University, New York, United States"],"email":"onov@nyu.edu","is_corresponding":false,"name":"Oded Nov"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1109","image_caption":"One of the three conditions used in our experiment consisting of a bar chart of the counts of victims in mass shootings in the United States from 2013 to 2023, highlighting the counts of Hispanic victims. The other two conditions consist of the same bar chart but highlight the counts of White and Black victims, respectively. ","keywords":["Visualization; Journalism; Mass shootings; Race; Homophily"],"open_access_supplemental_link":"https://osf.io/3crqx/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/pdf/2408.03269","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1109/v-short-1109_Preview.mp4?token=rHi1UHfRawEcazbN1rhXYIgginvswT3o2SwjyJkh0tg&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"5MyW9ssiG3s","session_youtube_ff_link":"https://youtu.be/5MyW9ssiG3s","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h36m45s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T13:06:00Z","title":"Connections Beyond Data: Exploring Homophily With Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1116","abstract":"Visualizations support rapid analysis of scientific datasets, allowing viewers to glean aggregate information (e.g., the mean) within split-seconds. While prior research has explored this ability in conventional charts, it is unclear if spatial visualizations used by computational scientists afford a similar ensemble perception capacity. We investigate people's ability to estimate two summary statistics, mean and variance, from pseudocolor scalar fields. In a crowdsourced experiment, we find that participants can reliably characterize both statistics, although variance discrimination requires a much stronger signal. Multi-hue and diverging colormaps outperformed monochromatic, luminance ramps in aiding this extraction. Analysis of qualitative responses suggests that participants often estimate the distribution of hotspots and valleys as visual proxies for data statistics. These findings suggest that people's summary interpretation of spatial datasets is likely driven by the appearance of discrete color segments, rather than assessments of overall luminance. Implicit color segmentation in quantitative displays could thus prove more useful than previously assumed by facilitating quick, gist-level judgments about color-coded visualizations.","accessible_pdf":false,"authors":[{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"vmateevitsi@anl.gov","is_corresponding":false,"name":"Victor A. Mateevitsi"},{"affiliations":["Argonne National Laboratory, Lemont, United States","University of Illinois Chicago, Chicago, United States"],"email":"papka@anl.gov","is_corresponding":false,"name":"Michael E. Papka"},{"affiliations":["Indiana University, Indianapolis, United States"],"email":"redak@iu.edu","is_corresponding":false,"name":"Khairi Reda"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1116","image_caption":"We studied whether people can rapidly perceive two ensemble statistics from scalar fields: the mean and variation. The figure illustrates the experimental procedures we used to evaluate this capacity.","keywords":["Ensemble perception, colormaps, scalar fields"],"open_access_supplemental_link":"https://osf.io/h8mn2/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2406.14452","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1116/v-short-1116_Preview.mp4?token=-2B8GvKPTbcGmjcSkOpRvnNnMWzdgzCHMnvQ4MO_vKg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1116/v-short-1116_Preview.srt?token=k8x_VR5-N0DudnoKoiYijSup6XtSz5uG2wKefJzoKC4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"lIUx96SZ0N4","session_youtube_ff_link":"https://youtu.be/lIUx96SZ0N4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h10m2s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T12:39:00Z","title":"Science in a Blink: Supporting Ensemble Perception in Scalar Fields","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1184","abstract":"To improve the perception of hierarchical structures in data sets, several color map generation algorithms have been proposed to take this structure into account. But the design of hierarchical color maps elicits different requirements to those of color maps for tabular data. Within this paper, we make an initial effort to put design rules from the color map literature into the context of hierarchical color maps. We investigate the impact of several design decisions and provide recommendations for various analysis scenarios. Thus, we lay the foundation for objective quality criteria to evaluate hierarchical color maps.","accessible_pdf":true,"authors":[{"affiliations":["Fraunhofer IGD, Darmstadt, Germany"],"email":"tobias.mertz@igd.fraunhofer.de","is_corresponding":true,"name":"Tobias Mertz"},{"affiliations":["Fraunhofer IGD, Darmstadt, Germany","TU Darmstadt, Darmstadt, Germany"],"email":"joern.kohlhammer@igd.fraunhofer.de","is_corresponding":false,"name":"J\u00f6rn Kohlhammer"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1184","image_caption":"The results of three different configurations of the popular Tree Colors algorithm for generating hierarchical color maps. The configurations produce color maps with different characteristics that are suitable for different analysis scenarios. Within this paper, we investigate the impact of six different design rules on hierarchical color map design in different analysis scenarios, to be able to decide which configuration suits our scenarios best.","keywords":["Guidelines, Color, Graph/Network and Tree Data."],"open_access_supplemental_link":"https://arxiv.org/abs/2407.08287","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.08287","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1184/v-short-1184_Preview.mp4?token=bLQpPzptN0CYuUR8xEdKMS11SJ_f0XXAboOPiNdryYE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"jtKTnjVQ_wQ","session_youtube_ff_link":"https://youtu.be/jtKTnjVQ_wQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h19m0s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T12:48:00Z","title":"Towards a Quality Approach to Hierarchical Color Maps","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1274","abstract":"This study examines the impact of positive and negative contrast polarities (i.e., light and dark modes) on the performance of younger adults and people in their late adulthood (PLA). In a crowdsourced study with 134 participants (69 below age 60, 66 aged 60 and above), we assessed their accuracy and time performing analysis tasks across three common visualization types (Bar, Line, Scatterplot) and two contrast polarities (positive and negative). We observed that, across both age groups, the polarity that led to better performance and the resulting amount of improvement varied on an individual basis, with each polarity benefiting comparable proportions of participants. However, the contrast polarity that led to better performance did not always match their preferred polarity. Additionally, we observed that the choice of contrast polarity can have an impact on time similar to that of the choice of visualization type, resulting in an average percent difference of around 36%. These findings indicate that, overall, the effects of contrast polarity on visual analysis performance do not noticeably change with age. Furthermore, they underscore the importance of making visualizations available in both contrast polarities to better-support a broad audience with differing needs. Supplementary materials for this work can be found at https://osf.io/539a4/.","accessible_pdf":false,"authors":[{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"zwhile@cs.umass.edu","is_corresponding":true,"name":"Zack While"},{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"asarv@cs.umass.edu","is_corresponding":false,"name":"Ali Sarvghad"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1274","image_caption":"Two rows of data visualizations, each row consisting of 3 visualizations: a scatterplot, bar chart, and line chart, respectively. The top row uses positive contrast, also known as light mode, while the bottom row uses negative contrast, also known as dark mode.","keywords":["people in late adulthood, GerontoVis, data visualization, contrast polarity"],"open_access_supplemental_link":"https://osf.io/539a4","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1274/v-short-1274_Preview.mp4?token=oJ4pXma3alci88ApsX1vKqwlwbRciFJVinzLFrhA09I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1274/v-short-1274_Preview.srt?token=9gTOBheCSX7NlhM3wl2VgFcitssodWZdQxSagZEIqXQ&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"--dzVG5Ti8w","session_youtube_ff_link":"https://youtu.be/--dzVG5Ti8w","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h0m35s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T12:30:00Z","title":"Dark Mode or Light Mode? Exploring the Impact of Contrast Polarity on Visualization Performance Between Age Groups","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1301","abstract":"\"Reactionary delay\" is a result of the accumulated cascading effects of knock-on train delays which is increasing on UK railways due to increasing utilisation of the railway infrastructure. The chaotic nature of its effects on train lateness is notoriously hard to predict. We use a stochastic Monte-Carto-style simulation of reactionary delay that produces whole distributions of likely reactionary delay and delays this causes. We demonstrate how Zoomable Level-of-Detail ChartTables - case-by-variable tables where cases are rows, variables are columns, variables are complex composite metrics that incorporate distributions, and cells contain mini-charts that depict these as different levels of detail through zoom interaction - help interpret whole distributions of model outputs to help understand the causes and effects of reactionary delay, how they inform timetable robustness testing, and how they could be used in other contexts.","accessible_pdf":false,"authors":[{"affiliations":["City, University of London, London, United Kingdom"],"email":"a.slingsby@city.ac.uk","is_corresponding":true,"name":"Aidan Slingsby"},{"affiliations":["Risk Solutions, Warrington, United Kingdom"],"email":"jonathan.hyde@risksol.co.uk","is_corresponding":false,"name":"Jonathan Hyde"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1301","image_caption":"A Zoomable Level-of-Detail ChartTable, in which train delay metrics (columns) are represented as mini-charts for each train (row).","keywords":["Level-of-detail, mini-charts, distributions, stochastic modelling."],"open_access_supplemental_link":"https://osf.io/u2ykd/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"http://arxiv.org/abs/2408.01203","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1301/v-short-1301_Preview.mp4?token=BMyADGaR-JCzamFZSa7_nHAFd0sZ2xMO1MoowOJ3Ui8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"oBxNVn63rEM","session_youtube_ff_link":"https://youtu.be/oBxNVn63rEM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h55m10s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T13:24:00Z","title":"Zoomable Level-of-Detail ChartTables for Interpreting Probabilistic Model Outputs for Reactionary Train Delays","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1062","abstract":"Annotations are a critical component of visualizations, helping viewers interpret the visual representation and highlighting critical data insights. Despite their significant role, we lack an understand- ing of how annotations can be incorporated into other data representations, such as physicalizations and sonifications. Given the emergent nature of these representations, sonifications, and physicalizations lack formalized conventions (e.g., design space, vocabulary) that can introduce challenges for audiences to interpret the intended data encoding. To address this challenge, this work focuses on how annotations can be more tightly integrated into the design process of creating sonifications and physicalizations. In an exploratory study with 13 designers, we explore how visualization annotation techniques can be adapted to sonic and physical modalities. Our work highlights how annotations for sonification and physicalizations are inseparable from their data encodings.","accessible_pdf":false,"authors":[{"affiliations":["Whitman College, Walla Walla, United States"],"email":"sorensor@whitman.edu","is_corresponding":false,"name":"Rhys Sorenson-Graff"},{"affiliations":["University of Colorado Boulder, Boulder, United States"],"email":"sandra.bae@colorado.edu","is_corresponding":false,"name":"S. Sandra Bae"},{"affiliations":["Whitman College, Walla Walla, United States"],"email":"wirfsbro@colorado.edu","is_corresponding":true,"name":"Jordan Wirfs-Brock"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1062","image_caption":"Examples of geometric annotations used in a visualization, sonification, and physicalization. Geometric annotations draw attention to a specific section of the data representation, providing additional context, detail, and clarity to a section if it contains crucial information or is of significant interest to the viewer. Visualizations can integrate geometric annotations with call-out boxes. Sonifications can highlight specific excerpts using sub-clips of audio. Physicalizations can present multiple frames of reference to emphasize different perspectives that zoom in and out of the physicalization (photo credit to Klauss et al.)","keywords":["Annotations, physicalization, sonification"],"open_access_supplemental_link":"https://osf.io/wu6g9/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.04574","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1062/v-short-1062_Preview.mp4?token=fZ4-LL2x7Xo3IEq6PbGThmlQ6hE3if6_RtUivcv_1qc&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"ANwzcGZYe8E","session_youtube_ff_link":"https://youtu.be/ANwzcGZYe8E","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h47m56s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T15:00:00Z","title":"Integrating Annotations into the Design Process for Sonifications and Physicalizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1068","abstract":"Integrating textual content, such as titles, annotations, and captions, with visualizations facilitates comprehension and takeaways during data exploration. Yet current tools often lack mechanisms for integrating meaningful long-form prose with visual data. This paper introduces DASH, a bimodal data exploration tool that supports integrating semantic levels into the interactive process of visualization and text-based analysis. DASH operationalizes a modified version of Lundgard et al.\u2019s semantic hierarchy model that categorizes data descriptions into four levels ranging from basic encodings to high-level insights. By leveraging this structured semantic level framework and a large language model\u2019s text generation capabilities, DASH enables the creation of data-driven narratives via drag-and-drop user interaction. Through a preliminary user evaluation, we discuss the utility of DASH\u2019s text and chart integration capabilities when participants perform data exploration with the tool.","accessible_pdf":true,"authors":[{"affiliations":["Tableau Research, Seattle, United States"],"email":"bromley.denny@gmail.com","is_corresponding":true,"name":"Dennis Bromley"},{"affiliations":["Tableau Research, Palo Alto, United States"],"email":"vsetlur@tableau.com","is_corresponding":false,"name":"Vidya Setlur"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1068","image_caption":"DASH is an interactive bimodal data analysis system that facilitates drag-and-drop analysis between text and visual representations of data. Users can expand on chart marks or text phrases by dragging them to DASH\u2019s text region, or drill down into them by dragging them to DASH\u2019s chart region. Using a modified Lundgard et al semantic hierarchy, DASH helps users create data analyses that combine high-level insights with low-level supporting visualizations.","keywords":["Semantic levels, LLMs, text generation"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.01011","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1068/v-short-1068_Preview.mp4?token=ofxmxp8U-50dbgsNLkj35JCykoRs7C8S6IINtrBo8p4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1068/v-short-1068_Preview.srt?token=HsSn0QkGmegjSHB8PhO6Wjuk5OUpxFRfasDCsQLE7Ww&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"3Jlkw_OKzlE","session_youtube_ff_link":"https://youtu.be/3Jlkw_OKzlE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h9m45s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T14:24:00Z","title":"DASH: A Bimodal Data Exploration Tool for Interactive Text and Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1078","abstract":"Data visualizations are reaching global audiences. As people who use Right-to-left (RTL) scripts constitute over a billion potential data visualization users, a need emerges to investigate how visualizations are communicated to them. Web design guidelines exist to assist designers in adapting different reading directions, yet we lack a similar standard for visualization design. This paper investigates the design patterns of visualizations with RTL scripts. We collected 128 visualizations from data-driven articles published in Arabic news outlets and analyzed their chart composition, textual elements, and sources. Our analysis suggests that designers tend to apply RTL approaches more frequently for categorical data. In other situations, we observed a mix of Left-to-right (LTR) and RTL approaches for chart directions and structures, sometimes inconsistently utilized within the same article. We reflect on this lack of clear guidelines for RTL data visualizations and derive implications for visualization authoring tools and future research directions.","accessible_pdf":true,"authors":[{"affiliations":["University College London, London, United Kingdom","UAE University , Al Ain, United Arab Emirates"],"email":"muna.alebri.19@ucl.ac.uk","is_corresponding":true,"name":"Muna Alebri"},{"affiliations":["Worcester Polytechnic Institute, Worcester, United States"],"email":"ntrakotondravony@wpi.edu","is_corresponding":false,"name":"No\u00eblle Rakotondravony"},{"affiliations":["Worcester Polytechnic Institute, Worcester, United States"],"email":"ltharrison@wpi.edu","is_corresponding":false,"name":"Lane Harrison"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1078","image_caption":"Data visualizations from two articles available in Arabic and other left-to-right languages. The bar chart shows categorical data points that are non-ordinal (source: Inkyfada). The line chart shows ordered data points, its x-axis represents time sequence. Both charts are mirrored and their orientation follows the direction of the article language, i.e. from right to left for Arabic and left to right for English. The position of the logo of the journal, and the mention of the data source are also mirrored when switching between visualization in RTL and LTR languages.","keywords":["Design Patterns, Right-To-Left Visualizations, Data Journalism"],"open_access_supplemental_link":"https://rdr.ucl.ac.uk/articles/dataset/Code_book_of_RTL_visualization_in_Arabic_News_media/26150749/1","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://discovery.ucl.ac.uk/id/eprint/10194127/","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1078/v-short-1078_Preview.mp4?token=CnVCQt3ox-J30yoeusQPdACfd-PZQs0bBldoBUnHbqM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1078/v-short-1078_Preview.srt?token=U7RTz016QX5hdE33XJ4y3nco8gC6-cpdsMTjLcL7ziY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"87XnPiyYb1U","session_youtube_ff_link":"https://youtu.be/87XnPiyYb1U","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h0m40s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T14:15:00Z","title":"Design Patterns in Right-to-Left Visualizations: The Case of Arabic Content","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1079","abstract":"Image datasets serve as the foundation for machine learning models in computer vision, significantly influencing model capabilities, performance, and biases alongside architectural considerations. Therefore, understanding the composition and distribution of these datasets has become increasingly crucial. To address the need for intuitive exploration of these datasets, we propose AEye, an extensible and scalable visualization tool tailored to image datasets. AEye utilizes a contrastively trained model to embed images into semantically meaningful high-dimensional representations, facilitating data clustering and organization. To visualize the high-dimensional representations, we project them onto a two-dimensional plane and arrange images in layers so users can seamlessly navigate and explore them interactively. AEye facilitates semantic search functionalities for both text and image queries, enabling users to search for content. We open-source the codebase for AEye, and provide a simple configuration to add datasets. ","accessible_pdf":true,"authors":[{"affiliations":["ETH Zurich, Zurich, Switzerland"],"email":"fgroetschla@ethz.ch","is_corresponding":true,"name":"Florian Gr\u00f6tschla"},{"affiliations":["ETH Zurich, Zurich, Switzerland"],"email":"lanzendoerfer@ethz.ch","is_corresponding":false,"name":"Luca A Lanzend\u00f6rfer"},{"affiliations":["ETH Zurich, Zurich, Switzerland"],"email":"mcalzavara@student.ethz.ch","is_corresponding":false,"name":"Marco Calzavara"},{"affiliations":["ETH Zurich, Zurich, Switzerland"],"email":"wattenhofer@ethz.ch","is_corresponding":false,"name":"Roger Wattenhofer"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1079","image_caption":"Overview of the AEye interface. Images are positioned according to their location in the CLIP embedding space and arranged in layers that the user can navigate by zooming. Top left: Dataset selector, Top middle: Search bar for semantic text and image search. Top right: Show information about the application. Bottom right: Minimap of the embedding space. ","keywords":["Image embeddings, image visualization, contrastive learning, semantic search."],"open_access_supplemental_link":"https://github.com/ETH-DISCO/aeye","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.04072","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1079/v-short-1079_Preview.mp4?token=n3u7b786e-vMYMvE_IQ5U9cjS2frtaZk02wVGgfo0KE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1079/v-short-1079_Preview.srt?token=hFQcl6SIBRsQTGVASl2lxRII2B4p0D9G-Bu6d5HgxoI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"JdTXigyYkkw","session_youtube_ff_link":"https://youtu.be/JdTXigyYkkw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h56m23s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T15:09:00Z","title":"AEye: A Visualization Tool for Image Datasets","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1100","abstract":"Confidence scores of automatic speech recognition (ASR) outputs are often inadequately communicated, preventing its seamless integration into analytical workflows. In this paper, we introduce Confides, a visual analytic system developed in collaboration with intelligence analysts to address this issue. Confides aims to aid exploration and post-AI-transcription editing by visually representing the confidence associated with the transcription. We demonstrate how our tool can assist intelligence analysts who use ASR outputs in their analytical and exploratory tasks and how it can help mitigate misinterpretation of crucial information. We also discuss opportunities for improving textual data cleaning and model transparency for human-machine collaboration.","accessible_pdf":true,"authors":[{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"sha@wustl.edu","is_corresponding":true,"name":"Sunwoo Ha"},{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"chaelim@wustl.edu","is_corresponding":false,"name":"Chaehun Lim"},{"affiliations":["Smith College, Northampton, United States"],"email":"jcrouser@smith.edu","is_corresponding":false,"name":"R. Jordan Crouser"},{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"alvitta@wustl.edu","is_corresponding":false,"name":"Alvitta Ottley"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1100","image_caption":"Overview of Confides: (a) The collapsible side menu contains controls for selecting, uploading, and transcribing audio files. (b) At the top of the dashboard are the audio player and search bar. (c) The confidence overview displays the length and average confidence value of each line segment in the transcription (encoded by the width and opacity of each rectangle, respectively). (d) The word tree provides context to a specific search term and shows which words most often follow or precede it. (e) The user can view and edit the transcription; each word is underlined, and its opacity indicates the confidence score.","keywords":["Visual analytics, confidence visualization, automatic speech recognition"],"open_access_supplemental_link":"https://github.com/washuvis/vis2024confides","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2405.00223","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1100/v-short-1100_Preview.mp4?token=MEgqXbErF2uaQUneqbipGJAuaMBY8pOPLCDzbdEDAtU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"tBOVI_-pLQ4","session_youtube_ff_link":"https://youtu.be/tBOVI_-pLQ4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h39m10s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T14:51:00Z","title":"Confides: A Visual Analytics Solution for Automated Speech Recognition Analysis and Exploration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1144","abstract":"Reconstruction of 3D scenes from 2D images is a technical challenge that impacts domains from Earth and planetary sciences and space exploration to augmented and virtual reality. Typically, reconstruction algorithms first identify common features across images and then minimize reconstruction errors after estimating the shape of the terrain. This bundle adjustment (BA) step optimizes around a single, simplifying scalar value that obfuscates many possible causes of reconstruction errors (e.g., initial estimate of the position and orientation of the camera, lighting conditions, ease of feature detection in the terrain). Reconstruction errors can lead to inaccurate scientific inferences or endanger a spacecraft exploring a remote environment. To address this challenge, we present VECTOR, a visual analysis tool that improves error inspection for stereo reconstruction BA. VECTOR provides analysts with previously unavailable visibility into feature locations, camera pose, and computed 3D points. VECTOR was developed in partnership with the Perseverance Mars Rover and Ingenuity Mars Helicopter terrain reconstruction team at the NASA Jet Propulsion Laboratory. We report on how this tool was used to debug and improve terrain reconstruction for the Mars 2020 mission.","accessible_pdf":false,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"racquel.fygenson@gmail.com","is_corresponding":true,"name":"Racquel Fygenson"},{"affiliations":["Weta FX, Auckland, New Zealand"],"email":"kjawad@andrew.cmu.edu","is_corresponding":false,"name":"Kazi Jawad"},{"affiliations":["Art Center, Pasadena, United States"],"email":"zongzhanisabelli@gmail.com","is_corresponding":false,"name":"Zongzhan Li"},{"affiliations":["California Institute of Technology, Pasadena, United States"],"email":"francois.ayoub@jpl.nasa.gov","is_corresponding":false,"name":"Francois Ayoub"},{"affiliations":["California Institute of Technology, Pasadena, United States"],"email":"bob.deen@jpl.nasa.gov","is_corresponding":false,"name":"Robert G Deen"},{"affiliations":["California Institute of Technology, Pasadena, United States"],"email":"sd@scottdavidoff.com","is_corresponding":false,"name":"Scott Davidoff"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"domoritz@cmu.edu","is_corresponding":false,"name":"Dominik Moritz"},{"affiliations":["NASA-JPL, Pasadena, United States"],"email":"mauricio.a.hess.flores@jpl.nasa.gov","is_corresponding":false,"name":"Mauricio Hess-Flores"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1144","image_caption":"We present VECTOR, software that visualizes 3D reconstruction error for easier comprehension and more informed input modification. VECTOR consists of image views that superimpose residual error vectors on top of input images and 3-dimensional camera views that show spatially how multiple images are calibrated by a reconstruction algorithm to render a 3D output.","keywords":["Computer vision, stereo image processing, optimization, error analysis, uncertainty, SLAM, SfM, robotics"],"open_access_supplemental_link":"https://github.com/NASA-AMMOS/VECTOR","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.03503","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1144/v-short-1144_Preview.mp4?token=YSmFa8I-T48gXtFd-NX4Gy8_zyKOP8lr0uTaoqgXJNw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1144/v-short-1144_Preview.srt?token=D8vJ9rJe-6EA1yRWkHrW_f_MVYtiaPG9wnNc3zKr4F0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"M97VBVFg46E","session_youtube_ff_link":"https://youtu.be/M97VBVFg46E","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=1h5m7s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T15:18:00Z","title":"Opening the Black Box of 3D Reconstruction Error Analysis with VECTOR","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1236","abstract":"Automatically generating data visualizations in response to human utterances on datasets necessitates a deep semantic understanding of the utterance, including implicit and explicit references to data attributes, visualization tasks, and necessary data preparation steps. Natural Language Interfaces (NLIs) for data visualization have explored ways to infer such information, yet challenges persist due to inherent uncertainty in human speech. Recent advances in Large Language Models (LLMs) provide an avenue to address these challenges, but their ability to extract the relevant semantic information remains unexplored. In this study, we evaluate four publicly available LLMs (GPT-4, Gemini-Pro, Llama3, and Mixtral), investigating their ability to comprehend utterances even in the presence of uncertainty and identify the relevant data context and visual tasks. Our findings reveal that LLMs are sensitive to uncertainties in utterances. Despite this sensitivity, they are able to extract the relevant data context. However, LLMs struggle with inferring visualization tasks. Based on these results, we highlight future research directions on using LLMs for visualization generation. Our supplementary materials have been shared on GitHub: https://github.com/hdi-umd/Semantic_Profiling_LLM_Evaluation.","accessible_pdf":true,"authors":[{"affiliations":["University of Maryland, College Park, United States"],"email":"hbako@umd.edu","is_corresponding":true,"name":"Hannah K. Bako"},{"affiliations":["University of Maryland, College Park, United States"],"email":"arshnoorbhutani8@gmail.com","is_corresponding":false,"name":"Arshnoor Bhutani"},{"affiliations":["The University of Texas at Austin, Austin, United States"],"email":"xinyi.liu@utexas.edu","is_corresponding":false,"name":"Xinyi Liu"},{"affiliations":["University of Maryland, College Park, United States"],"email":"kcobbina@cs.umd.edu","is_corresponding":false,"name":"Kwesi Adu Cobbina"},{"affiliations":["University of Maryland, College Park, United States"],"email":"leozcliu@umd.edu","is_corresponding":false,"name":"Zhicheng Liu"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1236","image_caption":"The image presents a study evaluating the semantic profiling abilities of large language models (LLMs) for natural language utterances in data visualization tasks, analyzing clarity, data context extraction, and task classification across 500 utterances and 37 datasets.","keywords":["Human-centered computing\u2014Visualization\u2014Empirical studies in visualization;"],"open_access_supplemental_link":"https://github.com/hdi-umd/Semantic_Profiling_LLM_Evaluation/","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.06129","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1236/v-short-1236_Preview.mp4?token=I3UvrII-G1uCgR91evMvz1wVxYcuroghU4YBe1mKrSc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1236/v-short-1236_Preview.srt?token=dfnnBvj8b0vGXpsosgMgxryg0CSmgulBZEyMgaOeWyI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"hZQ9TFfCsvM","session_youtube_ff_link":"https://youtu.be/hZQ9TFfCsvM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h19m26s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T14:33:00Z","title":"Evaluating the Semantic Profiling Abilities of LLMs for Natural Language Utterances in Data Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1276","abstract":"Machine Learning models for chart-grounded Q&A (CQA) often treat charts as images, but performing CQA on pixel values has proven challenging. We thus investigate a resource overlooked by current ML-based approaches: the declarative documents describing how charts should visually encode data (i.e., chart specifications). In this work, we use chart specifications to enhance language models (LMs) for chart-reading tasks, such that the resulting system can robustly understand language for CQA. Through a case study with 359 bar charts, we test novel fine tuning schemes on both GPT-3 and T5 using a new dataset curated for two CQA tasks: question-answering and visual explanation generation. Our text-only approaches strongly outperform vision-based GPT-4 on explanation generation (99% vs. 63% accuracy), and show promising results for question-answering (57-67% accuracy). Through in-depth experiments, we also show that our text-only approaches are mostly robust to natural language variation.","accessible_pdf":true,"authors":[{"affiliations":["Adobe Research, San Jose, United States"],"email":"victorbursztyn2022@u.northwestern.edu","is_corresponding":false,"name":"Victor S. Bursztyn"},{"affiliations":["Adobe Research, Seattle, United States"],"email":"jhoffs@adobe.com","is_corresponding":true,"name":"Jane Hoffswell"},{"affiliations":["Adobe Research, San Jose, United States"],"email":"sguo@adobe.com","is_corresponding":false,"name":"Shunan Guo"},{"affiliations":["Adobe Research, San Jose, United States"],"email":"eunyee@adobe.com","is_corresponding":false,"name":"Eunyee Koh"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1276","image_caption":"We explore two main tasks related to chart-grounded Q&A: question answering (QA) and visual explanation generation (VEG). QA leverages templated domain facts (DF) from the chart's CSV file, whereas VEG relies on visual context (VC) from its JSON file. In the first fine-tuning step, the charts' underlying text files are injected into the language models (LMs). We then fine-tune the QA and VEG steps on 90% of the charts, with 10% held out for testing during our evaluation in \u00a74. To understand the robustness of our LMs to natural language variation, we also perform a question paraphrasing task to rephrase our template-generated questions more naturally.","keywords":["Machine Learning Techniques; Charts, Diagrams, and Plots; Datasets; Computational Benchmark Studies"],"open_access_supplemental_link":"https://github.com/vbursztyn/charts-as-text-for-chartqa","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1276/v-short-1276_Preview.mp4?token=uk4Bi8iMPydutIRUB_RlyQm3_UIvGt5EwfIIOdsPMtI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1276/v-short-1276_Preview.srt?token=KgNukVsaW0mEY5605DEVIRE1wJQblZjvqbDrrUKxGBk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"m9owYC9e3PU","session_youtube_ff_link":"https://youtu.be/m9owYC9e3PU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h30m12s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T14:42:00Z","title":"Representing Charts as Text for Language Models: An In-Depth Study of Question Answering for Bar Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1363","abstract":"Data visualization aids in making data analysis more intuitive and in-depth, with widespread applications in fields such as biology, finance, and medicine. For massive and continuously growing streaming time series data, these data are typically visualized in the form of line charts, but the data transmission puts significant pressure on the network, leading to visualization lag or even failure to render completely. This paper proposes a universal sampling algorithm FPCS, which retains feature points from continuously received streaming time series data, compensates for the frequent fluctuating feature points, and aims to achieve efficient visualization. This algorithm bridges the gap in sampling for streaming time series data. The algorithm has several advantages: (1) It optimizes the sampling results by compensating for fewer feature points, retaining the visualization features of the original data very well, ensuring high-quality sampled data; (2) The execution time is the shortest compared to similar existing algorithms; (3) It has an almost negligible space overhead; (4) The data sampling process does not depend on the overall data; (5) This algorithm can be applied to infinite streaming data and finite static data.","accessible_pdf":false,"authors":[{"affiliations":["China Nanhu Academy of Electronics and Information Technology(CNAEIT), JiaXing, China"],"email":"3271961659@qq.com","is_corresponding":true,"name":"Hongyan Li"},{"affiliations":["China Nanhu Academy of Electronics and Information Technology(CNAEIT), JiaXing, China"],"email":"ustcboy@outlook.com","is_corresponding":false,"name":"Bo Yang"},{"affiliations":["China Nanhu Academy of Electronics and Information Technology, Jiaxing, China"],"email":"caiyansong@cnaeit.com","is_corresponding":false,"name":"Yansong Chua"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1363","image_caption":"The FPCS algorithm is used to sample streaming time series data. Each row corresponds to one of the five typical datasets. Columns 1-4 represent the visualization fitting effect of the first 100,000 data points in these datasets using the newly proposed FPCS and the other three algorithms, based on a 100:1 sampling ratio. The red line represents original data points; the green line represents sampled data points. Column 5 uses SSIM to compare the sampling effects of the four algorithms based on sampling ratios of 100:1, 200:1, 500:1, and 1000:1. The FPCS algorithm shows the best sampling results and performance.","keywords":["Data visualization, Massive, Streaming, Time series, Line charts, Sampling, Feature, Compensating"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1363/v-full-1363_Preview.mp4?token=0gXWRmcCMfl6L3ip3HixMsJXLSVigQa_bEzROzFp0No&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1363/v-full-1363_Preview.srt?token=EazpHxQWh4ZjsjNz-jgkfAVPiCtu1Z_KAYxozQRJBp8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-full","session_youtube_ff_id":"TAD1E6fAMHU","session_youtube_ff_link":"https://youtu.be/TAD1E6fAMHU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h0m34s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T12:30:00Z","title":"FPCS: Feature Preserving Compensated Sampling of Streaming Time Series Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1708","abstract":"The widespread use of Deep Neural Networks (DNNs) has recently resulted in their application to challenging scientific visualization tasks. While advanced DNNs demonstrate impressive generalization abilities, understanding factors like prediction quality, confidence, robustness, and uncertainty is crucial. These insights aid application scientists in making informed decisions. However, DNNs lack inherent mechanisms to measure prediction uncertainty, prompting the creation of distinct frameworks for constructing robust uncertainty-aware models tailored to various visualization tasks. In this work, we develop uncertainty-aware implicit neural representations to model steady-state vector fields effectively. We comprehensively evaluate the efficacy of two principled deep uncertainty estimation techniques: (1) Deep Ensemble and (2) Monte Carlo Dropout, aimed at enabling uncertainty-informed visual analysis of features within steady vector field data. Our detailed exploration using several vector data sets indicate that uncertainty-aware models generate informative visualization results of vector field features. Furthermore, incorporating prediction uncertainty improves the resilience and interpretability of our DNN model, rendering it applicable for the analysis of non-trivial vector field data sets.","accessible_pdf":true,"authors":[{"affiliations":["Indian Institute of Technology Kanpur , Kanpur, India"],"email":"atulkrfcb@gmail.com","is_corresponding":true,"name":"Atul Kumar"},{"affiliations":["Indian Institute of Technology Kanpur , Kanpur , India"],"email":"gsiddharth2209@gmail.com","is_corresponding":false,"name":"Siddharth Garg"},{"affiliations":["Indian Institute of Technology Kanpur (IIT Kanpur), Kanpur, India"],"email":"soumya.cvpr@gmail.com","is_corresponding":false,"name":"Soumya Dutta"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1708","image_caption":"Uncertainty-aware implicit neural representation learning of vector field data. This proposed method enables neural network-guided uncertainty-informed visual analytics of vector fields by estimating the prediction uncertainty associated with the predicted values, aiming to build trustworthy and robust neural representations of complex vector data.","keywords":["Implicit Neural Network, Uncertainty, Monte Carlo Dropout, Deep Ensemble, Vector Field, Visualization, Deep Learning."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.48550/arXiv.2407.16119","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1708/v-full-1708_Preview.mp4?token=RuwNr0_Oj-7iuGFhpcoszwyfx6b8OX43V7VsYW4Jp-I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1708/v-full-1708_Preview.srt?token=4tcbOL6zqtOUnLdOq4amQGeiATXilSco48Y5QL9epKM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-full","session_youtube_ff_id":"vEf-mNcR5M0","session_youtube_ff_link":"https://youtu.be/vEf-mNcR5M0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h11m49s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T12:42:00Z","title":"Uncertainty-Aware Deep Neural Representations for Visual Analysis of Vector Field Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1101","abstract":"Color coding, a technique assigning specific colors to cluster information types, has proven advantages in aiding human cognitive activities, especially reading and comprehension. The rise of Large Language Models (LLMs) has streamlined document coding, enabling simple automatic text labeling with various schemes. This has the potential to make color-coding more accessible and benefit more users. However, the impact of color choice on information seeking is understudied. We conducted a user study assessing various color schemes\u2019 effectiveness in LLM-coded text documents, standardizing contrast ratios to approximately 5.55:1 across schemes. Participants performed timed information-seeking tasks in color-coded scholarly abstracts. Results showed non-analogous and yellow-inclusive color schemes improved performance, with the latter also being more preferred by participants. These findings can inform better color scheme choices for text annotation. As LLMs advance document coding, we advocate for more research focusing on the \u201ccolor\u201d aspect of color-coding techniques.","accessible_pdf":false,"authors":[{"affiliations":["Pennsylvania State University, University Park, United States"],"email":"samnghoyin@gmail.com","is_corresponding":true,"name":"Ho Yin Ng"},{"affiliations":["Pennsylvania State University, University Park, United States"],"email":"zmh5268@psu.edu","is_corresponding":false,"name":"Zeyu He"},{"affiliations":["Pennsylvania State University, University Park , United States"],"email":"txh710@psu.edu","is_corresponding":false,"name":"Ting-Hao Kenneth Huang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1101","image_caption":"The left figure shows the 10 color schemes used in our user study, generated by combining cool (Red, Yellow) and warm (Green, Blue) colors as base colors. These schemes are categorized into groups for analysis. The right figure shows the study result that yellow-inclusive schemes are more effective for information seeking tasks, yielding higher accuracy and lower response times compared to other color schemes.","keywords":["Color, Color coding, Information seeking, Text visualization, Document."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1101/v-short-1101_Preview.mp4?token=klJe8UgVF0yzVDoDzhM_SYu91sIQDbFcUnsgTB_t7rU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-short","session_youtube_ff_id":"NbqkrDofSUs","session_youtube_ff_link":"https://youtu.be/NbqkrDofSUs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h24m23s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T12:54:00Z","title":"What Color Scheme is More Effective in Assisting Readers to Locate Information in a Color-Coded Article?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1199","abstract":"In the digital landscape, the ubiquity of data visualizations in media underscores the necessity for accessibility to ensure inclusivity for all users, including those with visual impairments. Current visual content often fails to cater to the needs of screen reader users due to the absence of comprehensive textual descriptions. To address this gap, we propose in this paper a framework designed to empower media content creators to transform charts into descriptive narratives. This tool not only facilitates the understanding of complex visual data through text but also fosters a broader awareness of accessibility in digital content creation. Through the application of this framework, users can interpret and convey the insights of data visualizations more effectively, accommodating a diverse audience. Our evaluations reveal that this tool not only enhances the comprehension of data visualizations but also promotes new perspectives on the represented data, thereby broadening the interpretative possibilities for all users.","accessible_pdf":false,"authors":[{"affiliations":["Polytechnique Montr\u00e9al, Montr\u00e9al, Canada"],"email":"qiangxu1204@gmail.com","is_corresponding":true,"name":"Qiang Xu"},{"affiliations":["Polytechnique Montreal, Montreal, Canada"],"email":"thomas.hurtut@polymtl.ca","is_corresponding":false,"name":"Thomas Hurtut"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1199","image_caption":"Main interface with three components: A. List of features in input chart, B. Generated descriptions of selected features, and C. Input chart itself. The list of features includes dropdowns for variable selection, and the generated descriptions are interactively linked to the chart.","keywords":["Accessibility, chart text description."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1199/v-short-1199_Preview.mp4?token=5UBKUTE7Fb4bntXLAbrFS0R6oeGXY8VcYqM22wFtWUI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1199/v-short-1199_Preview.srt?token=T5rn1XHfcNwElDoSMYngG8YVQBPVUf38dslvgAwHuhM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-short","session_youtube_ff_id":"9PS0vl2THtI","session_youtube_ff_link":"https://youtu.be/9PS0vl2THtI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h35m50s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T13:03:00Z","title":"From Graphs to Words: A Computer-Assisted Framework for the Production of Accessible Text Descriptions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1207","abstract":"An essential task of an air traffic controller is to manage the traffic flow by predicting future trajectories. Complex traffic patterns are difficult to predict and manage and impose cognitive load on the air traffic controllers. In this work we present an interactive visual analytics interface which facilitates detection and resolution of complex traffic patterns for air traffic controllers. The interface supports air traffic controllers in detecting complex clusters of aircraft and further enables them to visualize and simultaneously compare how different re-routing strategies for each individual aircraft yield reduction of complexity in the entire sector for the next hour. The development of the concepts was supported by the domain-specific feedback we received from six fully licensed and operational air traffic controllers in an iterative design process over a period of 14 months.","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden","Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"elmira.zohrevandi@liu.se","is_corresponding":true,"name":"Elmira Zohrevandi"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden","Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"katerina.vrotsou@liu.se","is_corresponding":false,"name":"Katerina Vrotsou"},{"affiliations":["Institute of Science and Technology, Norrk\u00f6ping, Sweden","Institute of Science and Technology, Norrk\u00f6ping, Sweden"],"email":"carl.westin@liu.se","is_corresponding":false,"name":"Carl A. L. Westin"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden","Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"jonas.lundberg@liu.se","is_corresponding":false,"name":"Jonas Lundberg"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden","Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"anders.ynnerman@liu.se","is_corresponding":false,"name":"Anders Ynnerman"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1207","image_caption":"The designed focus+context composite glyph aims to facilitate resolution of complex traffic patterns for air traffic controllers. The complexity resolutions are integrated with the conflict resolution glyph. The blue and red plots depict cluster complexity variations with heading and speed changes for a selected aircraft.","keywords":["Visual analytics, Visualization design, Safety-critical systems"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1207/v-short-1207_Preview.mp4?token=cIsnQXbhp_0uoaxDRqO-zLY7uifEQm61gs0IhBotL2Q&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1207/v-short-1207_Preview.srt?token=0sbOYWqtb9L6yYldqdedkFmlEbSGpgfQBGaJ2RdAGSo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-short","session_youtube_ff_id":"L6BdVBUeOno","session_youtube_ff_link":"https://youtu.be/L6BdVBUeOno","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h44m25s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T13:12:00Z","title":"Design of a Real-Time Visual Analytics Decision Support Interface to Manage Air Traffic Complexity","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1277","abstract":"Trust is a subjective yet fundamental component of human-computer interaction, and is a determining factor in shaping the efficacy of data visualizations. Prior research has identified five dimensions of trust assessment in visualizations (credibility, clarity, reliability, familiarity, and confidence), and observed that these dimensions tend to vary predictably along with certain features of the visualization being evaluated. This raises a further question: how do the design features driving viewers' trust assessment vary with the characteristics of the viewers themselves? By reanalyzing data from these studies through the lens of individual differences, we build a more detailed map of the relationships between design features, individual characteristics, and trust behaviors. In particular, we model the distinct contributions of endogenous design features (such as visualization type, or the use of color) and exogenous user characteristics (such as visualization literacy), as well as the interactions between them. We then use these findings to make recommendations for individualized and adaptive visualization design.","accessible_pdf":true,"authors":[{"affiliations":["Smith College, Northampton, United States"],"email":"jcrouser@smith.edu","is_corresponding":false,"name":"R. Jordan Crouser"},{"affiliations":["Smith College, Northampton, United States"],"email":"cmatoussi@smith.edu","is_corresponding":true,"name":"Syrine Matoussi"},{"affiliations":["Smith College, Northampton, United States"],"email":"ekung@smith.edu","is_corresponding":false,"name":"Lan Kung"},{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"p.saugat@wustl.edu","is_corresponding":false,"name":"Saugat Pandey"},{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"m.oen@wustl.edu","is_corresponding":false,"name":"Oen G McKinley"},{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"alvitta@wustl.edu","is_corresponding":false,"name":"Alvitta Ottley"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1277","image_caption":"A recursive partitioning approach to identifying exogenous and endogenous predictors of trust behavior.","keywords":["Trust, data visualization, individual differences, personality"],"open_access_supplemental_link":"https://osf.io/k5tzr/","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.03800","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1277/v-short-1277_Preview.mp4?token=0jxSIKCsT42OZsAZs8zWAJQy6w8sb7p_2uNXfJQr1F0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1277/v-short-1277_Preview.srt?token=YrMUfzToeptnVTQS9X7MQOBjfabmYa3bWVtZOqDRv7c&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-short","session_youtube_ff_id":"cBfXDjQRmaM","session_youtube_ff_link":"https://youtu.be/cBfXDjQRmaM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h52m20s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T13:21:00Z","title":"Building and Eroding: Exogenous and Endogenous Factors that Influence Subjective Trust in Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1055","abstract":"Data is moving beyond the scientific community, flooding communication channels and addressing issues of importance to all aspects of daily life. This highlights the need for rich and expressive data representations to communicate the science on which society rests and on which society must act. However, current visualization techniques often lack the broad visual vocabulary needed to accommodate the explosion in data scale, diversity and audience perspectives. While previous work has mined artistic and design knowledge for color maps and shape affordances (glyphs) in visualization, line encoding has received little attention. In this paper we report on an exploration of visual properties that extend the vocabulary of the line, particularly for categorical encoding. We describe the creation of a corpus of lines motivated by artistic practice, Gestalt theory, and design principles, and present initial results from a study of how different visual properties influence how people associate these into sets of similar lines. While very preliminary, the findings suggest that a rich set of line attributes will support both association and categorical hierarchies, as well as provoke further inquiry into how and why line encoding can be more expressive in encoding multivariate, multidimensional data.","accessible_pdf":false,"authors":[{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"fsamsel@tacc.utexas.edu","is_corresponding":true,"name":"Francesca Samsel"},{"affiliations":["Simon Fraser University, Surrey, Canada"],"email":"lyn@sfu.ca","is_corresponding":false,"name":"Lyn Bartram"},{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"gda@tacc.utexas.edu","is_corresponding":false,"name":"Greg Abram"},{"affiliations":["University of Texas, Texas Advanced Computing Center, Austin, United States"],"email":"adb@tacc.utexas.edu","is_corresponding":false,"name":"Anne Bowen"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1055","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Papers","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Papers"],"time_stamp":"2024-10-16T14:15:00Z","title":"What\u2019s My Line? Exploring the Expressive Capacity of Lines in Scientific Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1102","abstract":"In the realm of human-computer interaction, AI interactive systems aim to foster connections and understanding among users further, deepening the communication between humans and machines as well as among multiple individuals. However, this paper highlights that current studies have neglected the media and philosophical dimensions, culminating in an interactive system named the 'Humanity Test.' \"Humanity\" refers to emotions and consciousness, while \"test\" signifies a critical study of AI technology and an exploration of the distinctions between humanity and technicality. Furthermore, based on a review of related literature, we argue that the focus of AI system research is shifting, with electroencephalogram (EEG) data becoming a trend in AI system integration. Collecting and analyzing experimental data, we identified three design directions: enhancing immersive experiences, creating emotional experiences, and expressing ideas. The experiment results indicate that integrating EEG data into AI systems markedly improves participants' immersive and emotional experiences. This integration not only promotes a deeper understanding of the human-machine boundary but also encourages empathic interactions among users. Based on these findings, EEG data as a medium shows a promising potential to enrich interactive experiences, providing new insights into integrating technology with human emotions.","accessible_pdf":false,"authors":[{"affiliations":["College of Design and Innovation, Tongji University, Shanghai, China"],"email":"373895198@qq.com","is_corresponding":true,"name":"Fang Fang"},{"affiliations":["College of Design and Innovation, Shanghai, China"],"email":"tanhaogao@gmail.com","is_corresponding":false,"name":"Tanhao Gao"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1102","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Papers","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Papers"],"time_stamp":"2024-10-16T14:25:00Z","title":"Humanity Test - EEG Data Mediated Artificial Intelligence Multiplayer Interactive System","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1044","abstract":"As advanced technology reshapes our perception, the dialogue between humans and the universe undergoes a transformative shift. Understanding this transformation can help us think about how humanity is headed in the future. To illustrate this dialogue shift, we propose the creation of a spatial art installation that embodies the revolution in dialogue. Drawing on interdisciplinary research and methodologies spanning anthropology, philosophy, astronomy, acoustics, computer science, and nomadic traditional singing, we embark on a transformative journey. Using artistic language, this work juxtaposes the most advanced astronomical observation practices of humanity with the ancient nomadic tradition of conversing with the cosmos. Specifically, it engages in a dialogue between the astronomical data from the James Webb Space Telescope and the throat-singing tradition of Khoomei. Subsequently, the work models the propagation of these sounds in three-dimensional space and materializes them into tangible entities. By immersing observers in the spatial representation of this dialogue, we offer a profound experience of evolving dialogue between human and the universe within the fluidity of spacetime.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"ywang342@connect.hkust-gz.edu.cn","is_corresponding":true,"name":"Fiona You Wang"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"anijiati587@connect.hkust-gz.edu.cn","is_corresponding":false,"name":"Joshua Nijiati Alimujiang"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"wohinwu@gmail.com","is_corresponding":false,"name":"Violet Wei Wu"},{"affiliations":["Washington University in St.Louis, St.Louis, United States"],"email":"liu.rose@wustl.edu","is_corresponding":false,"name":"Rose Yiwei Liu"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"kzhangcma@hkust-gz.edu.cn","is_corresponding":false,"name":"Kang Zhang"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1044","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Papers","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Papers"],"time_stamp":"2024-10-16T14:50:00Z","title":"Spacetime Dialogue: Integrating Astronomical Data and Khoomei in Spatial Installation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1052","abstract":"Data visualization is often associated with efficiency and the production of insights. However, visual artworks that utilize data as their artistic medium, often referred to as data art or artistic visualizations, receive less attention, especially in discussions surrounding exhibitions specifically focused on data visualization. Artistic visualization is typically presented and debated at conferences on data visualization and related areas in computing and design, usually involving an exhibition of works in parallel. While there are established exhibitions in electronic art, collective exhibitions focused on artistic data visualization, especially those independent of academic events, remain rare. Additionally, there is a limited amount of literature regarding the curatorial practice of specifically artistic data visualization exhibitions. This paper aims to contribute with the discussion of the curatorial processes behind two artistic data visualization exhibitions, Numerical Existence and Numerical Existence: Emergencies, held in Rio de Janeiro in 2018 and 2024, respectively. We will present a brief overview of curatorial at- tributes, identify the most common issues addressed in exhibitions dedicated to data visualization curated in artistic contexts, discuss the role and unique challenges of curatorial practice in this field, and share insights from our curatorial experience with two exhibitions. Furthermore, we will propose future directions for research and practice in the curation of artistic data visualization. Through this exploration, we aim to contribute to the curatorial practice of artistic data visualization, providing reflections and recommendations to enhance the development of this emerging field.","accessible_pdf":false,"authors":[{"affiliations":["Federal University of Rio de Janeiro, Rio de Janeiro, Brazil","Pontifical Catholic University of Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"luiztorresludwig@gmail.com","is_corresponding":false,"name":"Luiz Ludwig"},{"affiliations":["Rio de Janeiro State University, Rio de Janeiro, Brazil"],"email":"bcastro@esdi.uerj.br","is_corresponding":false,"name":"Barbara Castro"},{"affiliations":["Universidade Federal do Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"doriskos@eba.ufrj.br","is_corresponding":true,"name":"Doris Kosminsky"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1052","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Papers","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Papers"],"time_stamp":"2024-10-16T15:00:00Z","title":"Numerical Existence: Reflections on Curating Artistic Data Visualization Exhibitions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1082","abstract":"This article introduces an artistic research project that utilises artist-in-residency and exhibition as methods for exploring the possibilities of robotic 3D printing and ceramics. The interdisciplinary project unites artists and architects to collaborate on a proposed curatorial concept and Do-It-With-Others (DIWO) technological development. Constraints include material, specifically local clay, production technique, namely 3D printing with a robotic arm, and kiln size, as well as an exhibition concept that is further elaborated in the next chapter. The pictorial presents four projects as case studies demonstrating how the creatives integrate these constraints into their processes. This integration leads to the subsequent refinement and customization of the robotic-ceramics interface, aligning with the practitioners' requirements through software development. The project's focus extends beyond artistic outcomes, aiming also to advance the pipeline of 3D robotic printing in clay, employing a digitally controlled material press that has been developed in-house, with its functionality refined through practice.","accessible_pdf":false,"authors":[{"affiliations":["Academy of Media Arts Cologne, Cologne, Germany"],"email":"varvarag@gmail.com","is_corresponding":true,"name":"Varvara Guljajeva"},{"affiliations":["Tallinn University, Tallinn, Estonia","Academy of Media Arts Cologne, Cologne, Germany"],"email":"mar.canet@gmail.com","is_corresponding":false,"name":"Mar Canet Sola"},{"affiliations":["Estonian Academy of Arts, Tallinn, Estonia"],"email":"lauri.kilusk@artun.ee","is_corresponding":false,"name":"Lauri Kilusk"},{"affiliations":["Estonian Academy of Arts, Tallinn, Estonia"],"email":"martin.melioranski@artun.ee","is_corresponding":false,"name":"Martin Melioranski"},{"affiliations":["Estonian Academy of Arts, Tallinn, Estonia"],"email":"kaiko.kivi@artun.ee","is_corresponding":false,"name":"Kaiko Kivi"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1082","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T14:15:00Z","title":"Loading Ceramics: Visualising Possibilities of Robotics in Ceramics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1090","abstract":"With armed conflicts and wars continuing to occur globally, the pursuit of peace is an enduring concern. In the efforts to resolve these conflicts, a vast number of peace agreements have been signed. In this project, we examine the extent to which women and gender are explicitly acknowledged or addressed in peace agreements. Using debossing, we physicalize the mentions of women and gender in these agreements as a means to increase awareness and recognition of these often-overlooked constituencies.","accessible_pdf":false,"authors":[{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"jennylzx@outlook.com","is_corresponding":true,"name":"Jenny Long"},{"affiliations":["The University of Edinburgh, Edinburgh, United Kingdom"],"email":"jinrui.w@outlook.com","is_corresponding":false,"name":"Jinrui Wang"},{"affiliations":["School of Law (PeaceRep), Edinburgh, United Kingdom"],"email":"tvancisi@ed.ac.uk","is_corresponding":false,"name":"Tomas Vancisin"},{"affiliations":["School of Law (PeaceRep), Edinburgh, United Kingdom"],"email":"laura.wise@ed.ac.uk","is_corresponding":false,"name":"Laura Wise"},{"affiliations":["Newcastle University, Newcastle Upon Tyne, United Kingdom"],"email":"xinhuan.shu@gmail.com","is_corresponding":false,"name":"Xinhuan Shu"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"tcapel@ed.ac.uk","is_corresponding":false,"name":"Tara Capel"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"uhinrich@ed.ac.uk","is_corresponding":false,"name":"Uta Hinrichs"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1090","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T14:25:00Z","title":"Pieces of Peace: Women and Gender in Peace Agreements","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1099","abstract":"This pictorial illustrates an autoethnographic explora-tion of the first author\u2019s design practice for the data physicalization \u201cShredded Lives: A Decade of Migrant Loss.\u201d It emphasizes the parallel development of seven design components -- Interaction Mode, Technology, Data Representation, Physical Configuration & Scale, Dataset, Engagement Mode, and Spatial Experience. This flexible, non-hierarchical approach allows each of the seven design components to inform and evolve alongside the others, stemming from a desire to thor-oughly explore the design space without confinement by initial restrictions. As these design components overlap and intersect, dynamic interactions occur, leading to the manifestation of design ideas.","accessible_pdf":false,"authors":[{"affiliations":["Simon fraser university, Burnaby, Canada"],"email":"foroozan_daneshzand@sfu.ca","is_corresponding":true,"name":"Foroozan Daneshzand"},{"affiliations":["University of Victoria, Victoria, Canada"],"email":"cperin@uvic.ca","is_corresponding":false,"name":"Charles Perin"},{"affiliations":["Simon Fraser University, Burnaby, Canada"],"email":"sheelagh@sfu.ca","is_corresponding":false,"name":"Sheelagh Carpendale"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1099","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T14:35:00Z","title":"Design Process of 'Shredded Lives': An Illustrated Exploration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1077","abstract":"Metro systems are the pulsing veins of cities, traversing the city\u2019s texture and preserving the memory of urban life. Visualizing the metro, which is a visceral and accustomed part of the daily lived experience for residents, makes it reappear in residents' perspectives in a new form, becoming a more emblematic landscape of each city's unique identity and development. In this project, we introduce an abstraction method that encodes metro routes as lines, cities as squares, and the global map as an abstract representation. Along with the implementation of an interactive system, the project enables a comprehensive visual exploration of the global metro lines. Through this highly abstract and minimalist form, each city\u2019s structure, symbolic identity, and regional development are revealed. Moreover, the colorful global metro map efficiently portrays the diversity and evolution of metro lines worldwide. With this pictorial we narrate the design process and our reflections along the project.","accessible_pdf":false,"authors":[{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"cxyapril@stu.pku.edu.cn","is_corresponding":true,"name":"Xinyue Chen"},{"affiliations":["Central Academy of Fine Arts, Beijing, China","Central Academy of Fine Arts, Beijing, China"],"email":"846218997@qq.com","is_corresponding":false,"name":"Yixuan Zhang"},{"affiliations":["Shanghai Jiao Tong University, Shanghai, China","Shanghai Jiao Tong University, Shanghai, China"],"email":"flora20@sjtu.edu.cn","is_corresponding":false,"name":"Yutong Yang"},{"affiliations":["NUA School of Design, Nanjing, China","NUA School of Design, Nanjing, China"],"email":"503578112@qq.com","is_corresponding":false,"name":"Jing Chen"},{"affiliations":["Syracuse University, Syrcause, United States","Syracuse University, Syrcause, United States"],"email":"rxu@syr.edu","is_corresponding":false,"name":"Rebecca Ruige Xu"},{"affiliations":["Central Academy of Fine Arts, Beijing, China","Central Academy of Fine Arts, Beijing, China"],"email":"marco@cafa.edu.cn","is_corresponding":false,"name":"Wai Ping Chan"},{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"xiaoru.yuan@pku.edu.cn","is_corresponding":false,"name":"Xiaoru Yuan"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1077","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T14:50:00Z","title":"City Pulse: Revealing City Identity Through Abstraction of Metro Lines","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1035","abstract":"\u201cNorthness\u201d is an installation that maps the latitudes of the servers that host the most popular websites in Brazil. Composed of three-dimensional typographic sculptures, a touch screen and projection, the work allows the public to visualize and locate the servers of the one hundred most accessed websites in Brazil. This installation is part of research in artistic data visualization that addresses issues of the data infrastructure sustaining our society, highlighting the Global North\u2019s dominance in data flows. \u201cNorthness\u201d was featured in the exhibition \u201cNumerical Existence: Emergencies,\u201d which took place in 2024 at the Futuros Cultural Center in Brazil.","accessible_pdf":false,"authors":[{"affiliations":["Pontifical Catholic University of Rio de Janeiro, Rio de Janeiro, Brazil","Federal University of Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"luiztorresludwig@gmail.com","is_corresponding":true,"name":"Luiz Ludwig"},{"affiliations":["Universidade Federal do Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"doriskos@eba.ufrj.br","is_corresponding":false,"name":"Doris Kosminsky"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1035","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T15:00:00Z","title":"Northness: Poetic Visualization of Data Infrastructure Inequality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1047","abstract":"In the face of pressing global issues like climate change, data visualization is a powerful tool for making sense of complexity. With the project \u201cA Perfect Storm\u201d, we aim to engage audiences in the oft-difficult conversation around global climate change in a way that considers the emotional responses that the topic can trigger. Through a metaphorical approach of visually juxtaposing countries' climate risk with their climate responsibility, we encourage critical reflection on the human experience and inequities of climate change related loss.","accessible_pdf":false,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"hudsonprock.c@northeastern.edu","is_corresponding":true,"name":"Chloe Hudson Prock"},{"affiliations":["Northeastern University, Boston, United States"],"email":"p.cruz@northeastern.edu","is_corresponding":false,"name":"Pedro M. Cruz"},{"affiliations":["Northeastern University, Boston, United States"],"email":"gold.g@northeastern.edu","is_corresponding":false,"name":"Gregory Gold"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1047","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T15:10:00Z","title":"A Perfect Storm","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1004","abstract":"EchoVision is an immersive art installation that allows participants to experience the world of bats using sound visualization and mixed reality technology. With a custom-designed, bat-shaped mixed reality mask based on the open-source HoloKit mixed reality project, users can simulate echolocation, the natural navigation system bats use in the dark. They do this by using their voices and interpreting the returned echoes with the mixed-reality visualization. The exhibit adjusts visual feedback based on the pitch and tone of the user's voice, offering a dynamic and interactive depiction of how bats perceive their environment. This installation combines scientific learning with empathetic engagement, encouraging an ecocentric design perspective and understanding between species. \"EchoVision\" educates and inspires a deeper appreciation for the unique ways non-human creatures interact with their ecosystems.","accessible_pdf":false,"authors":[{"affiliations":["Reality Design Lab, New York City, United States"],"email":"botao@reality.design","is_corresponding":true,"name":"Botao Amber Hu"},{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"stephlijiabao@gmail.com","is_corresponding":false,"name":"Jiabao Li"},{"affiliations":["China Academy of Art, HangZhou, China"],"email":"danlinhuang0428@gmail.com","is_corresponding":false,"name":"Danlin Huang"},{"affiliations":["China Academy of Art, HangZhou, China"],"email":"liujianan705@outlook.com","is_corresponding":false,"name":"Jianan Johanna Liu"},{"affiliations":["Independent, Shanghai, China"],"email":"agalloch21@gmail.com","is_corresponding":false,"name":"Xiaobo Aaron Hu"},{"affiliations":["Reality Design Lab, New York City, United States"],"email":"elan@reality.design","is_corresponding":false,"name":"Yilan Elan Tao"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1004","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"EchoVision","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1014","abstract":"Flags of Inequality is a data exhibit based on the digital project of the same name. This artwork is a collection of forty-nine incomplete pride flags that invite the audience to reflect on the inequalities still faced by the LGBTQ+ population of European countries. This data visualization takes the pride flag, an iconic symbol of the community, and reworks it with data on the laws and policies in these countries to tell the story of inequality through a visual metaphor. In the visualization, the partial pride flags are presented in frames, juxtaposing color with a dark area that signifies the missing portion of the flag. Flags vary dramatically between countries. The flags for Malta or Iceland are almost complete, while the ones of Russia or Azerbaijan are barely visible. The incomplete flags portray the limitations to the lives of the queer community through the absence of color and space. On the other end, a colorful, almost complete flag is a reflection of a place where a whole, joyful queer life is more likely. This collection prompts the audience to face the emotional response caused by the meaning of the familiar yet altered symbol, promoting awareness of diverse queer realities and the need for social justice.","accessible_pdf":false,"authors":[{"affiliations":["Independent, Lisbon, Portugal"],"email":"costa.rita93@gmail.com","is_corresponding":true,"name":"Rita Costa"},{"affiliations":["Independent, Lisbon, Portugal"],"email":"mbeatrizmalveiro@gmail.com","is_corresponding":false,"name":"Beatriz Malveiro"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1014","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"Flags of Inequality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1028","abstract":"Collaborative art and co-creation enhance social well-being and connectivity. However, the combination of art creation through mutual brainwave interaction with the prosocial potential of EEG biosignals reveals an untapped opportunity. SynCocreate presents the design and prototype of a VR-based interpersonal electroencephalography (EEG) neurofeedback co-creation platform. This generative VR platform enables paired individuals to interact via brainwaves in a 3D virtual canvas, painted and animated collaboratively through their real-time brainwave data. The platform employs synchronized visual cues, aligned with the real-time brainwaves of paired users, to investigate the potential of collaborative neurofeedback in enhancing co-creativity and emotional connection. It also explores the use of Virtual Reality (VR) in fostering creativity and togetherness through immersive, collective visualizations of brainwaves.","accessible_pdf":false,"authors":[{"affiliations":["Independent Researcher, San Mateo, United States"],"email":"fionafeng97@outlook.com","is_corresponding":true,"name":"Xin Feng"},{"affiliations":["VLab, Cambridge, United States","Independent Designer, Cambridge, United States"],"email":"isabel.tg.wang@gmail.com","is_corresponding":false,"name":"Tiange Wang"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1028","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"SynCocreate: Fostering Interpersonal Connectedness via Brainwave-Driven Co-creation in Virtual Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1039","abstract":"Transferscope is an interactive installation that lets users explore and reflect the implications of generative artificial intelligence on our perception of the physical world. The handheld device allows users to sample materials, concepts and aesthetics and seamlessly project and apply them onto any object or scene, thereby creating imaginative and unique visual experiences. Transferscope is an open-source powered generative AI exploration device that showcases the expansive potential of AI technologies in artistic creation and design innovation. It empowers users to explore multifaceted aesthetics, pushing the boundaries of visual expression and conceptual ideation.","accessible_pdf":false,"authors":[{"affiliations":["University of Design Schw\u00e4bisch Gm\u00fcnd, Schw\u00e4bisch Gm\u00fcnd, Germany"],"email":"cpietsch@gmail.com","is_corresponding":true,"name":"Christopher Pietsch"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1039","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"Transferscope - Synthesized Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1041","abstract":"Displacement Flowers Visualizaing global human displacment due to natural disasters One of the pressing consequences of carbon-fueled climate change is its direct link to causing various forms of natural disasters. These disasters range from wildfires, and floods, to tsunamis and earthquakes. In the fallout of these disasters many people become displaced from their homes. By the year 2050 it is estimated that 140 million people will be displaced from their home countries of sub-Saharan Africa, South Asia, and Latin America due to these disasters (World Bank). As a result, it is of increasing importance to address the impacts of climate change and not only the effects on the environment, but also on the world\u2019s inhabitants. This visualization was created in order to showcase the impact of natural disasters and the need for climate reform globally in an aesthetically beautiful, and interpretable, way.","accessible_pdf":false,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"elizabeth.mccaffrey4@gmail.com","is_corresponding":true,"name":"Elizabeth Iris McCaffrey"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1041","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"Displacement Flowers","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1068","abstract":"'Rage Against the Archive' is an experimental browser-based video that critically probes how the New York Public Library's website catalogs, displays and even sells dehumanizing ethnographic photos from the 19th-century colonial-era publication The People of India. This work interrogates how images get decontexualized due to the archival process, and documents the \u201chacking\u201d methodology used to insert different texts on the website using HTML in a symbolic act of Electronic Civil Disobedience. The People of India, published between 1868-75, is one of the world's most comprehensive ethnographic books, commissioned by the British colonial government in India after the 1857 First War of Independence. After having experienced violent uprisings and the first challenge to their colonial rule, the British were keen to understand the native tribes and their cultures to rule them better and prevent future rebellions. The camera, masquerading as an objective device, was employed as an imperial tool by the colonial government to document natives, \u201cothering\u201d them in this process. How do these problematic historical images exist in our contemporary Networked Image Culture? This video scrutinizes whether institutional archives inadvertently perpetuate colonial exploitation and the camera's violence, raising ethical questions about how we as a more conscientious society should consume certain images online.","accessible_pdf":false,"authors":[{"affiliations":["Syracuse University, Syracuse, United States"],"email":"aroy07@syr.edu","is_corresponding":true,"name":"Anshul Roy"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1068","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"Rage Against the Archive","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1089","abstract":"Self-tracking data, often embodied in photos, is a pervasive yet underrecognized form of data that captures our experiences and emotions. \"Mosaic Memory Drive\" explores the materiality of digital images, questioning whether the essence of analog photography, described by Roland Barthes as its \"punctum\", persists in the digital age. By reconstructing images through an endless loop of pixel permutations, this work blurs the line between the original and its reinterpretations, challenging the notion of a post-photographic world. The piece functions as both a puzzle of self-tracked memories and a process of encryption and decryption, emphasizing the plasticity and ephemeral nature of digital media. Through this, it invites reflection on our evolving relationship with memory, presence, and the passage of time in the context of digital data.","accessible_pdf":false,"authors":[{"affiliations":["Institute of Visual Computing and Human-Centered Technology, Vienna, Austria"],"email":"ignbpm@gmail.com","is_corresponding":true,"name":"Ignacio P\u00e9rez-Messina"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1089","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"Mosaic Memory Drive","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1054","abstract":"Curbside is a personal exploration of (dis)ability and (im)mobility in wintertime Calgary. I use textiles, texts, and photographs to weave together self and the environment. Curbside connects quantitative data about snow and temperature with traces of environmental conditions using dyed wool yarns and photographs. Interlaced throughout are theoretically grounded autobiographical reflections about disability. These reflections focus on how landscape forms and interacts with disability in ways that are informed by water, snow, and ice. It embodies how different forms of data such as quantitative weather data, material traces, and personal stories can work together. Curbside is an example of data art that incorporates personal experience to illuminate local systems in thoughtful ways.","accessible_pdf":false,"authors":[{"affiliations":["University of Calgary, Calgary, Canada"],"email":"karly.ross@ucalgary.ca","is_corresponding":true,"name":"Karly Ross"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1054","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"Curbside","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1058","abstract":"Although artists and scientists often work together for a visual rendering of scientific concepts, rarely do the two come together in a such a close-knit, equal collaboration, in which the germination of the idea and the weaving together of art and science result in an oeuvre in which the scientist explains the science to the artist and the artist gives the artistic view of the science itself, allowing the public to enter the art to see the science. The data remains the same, with two different media providing different interpretive perspectives. \u00a0 In this project, five specific events in the history of the Greenland ice sheet are \u201cinterviewed\u201d, showing how the art and science are interlinked. \u201cInterviews\u201d is a multimodal art installation that seeks to provide viewers with an embodied understanding of glacial change. Through a range of scientific and artistic methodologies we identify distinct phases of knowledge-building about Greenland\u2019s ice as opportunities where texture, form, and diverse data can provide openings for encountering an otherwise overwhelming or threatening reality. Through \u201cInterviews,\u201d viewers are invited to see in Greenland\u2019s past possibilities for a different future. \u201cInterviews\u201d depicts technical advances that have enabled progress in our understanding of Greenland\u2019s Ice Sheet evolution over the millennia. The five columns, illustrate updates in methods of studying the ice, and are a testament to the ways that diverse data provide complementary insights to the same question, while at the same time illuminating new questions.","accessible_pdf":false,"authors":[{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"fsamsel@tacc.utexas.edu","is_corresponding":true,"name":"Francesca Samsel"},{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"benjamin.keisling@austin.utexas.edu","is_corresponding":false,"name":"Benjamin Keisling"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1058","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"Interviews with the Ice","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1078","abstract":"In the video series \u2018Biological Rhythms\u2019, electrical signals generated by plants are sonified and captured to drive real-time data visualisations. From this live data, we will create a series of eight video pieces ( see links to draft versions of the first four in \u2018Recent work, video links\u2019 section below). Living plants and the human body may appear to be very different entities, but they have many underlying confluences. Once such confluence is that both generate bio-electrical signals that pass through bodily systems. In \u2018Biological Rhythms\u2019 we will use these signals to generate real time visualisations, revealing the unseen bioelectrical rhythms of plants. Through the biological sciences, we understand plant meta- processes such as osmosis and photosynthesis, yet because their cellular structure is so delicate, plants are notoriously hard to study in fine detail. Sonifying plant signals affords a method to explore their bio-rhythms in an accessible form for a non-scientific audience. As part of our bespoke and innovative method, the electrical signals from plants are converted to audio and passed through the program Touch Designer, where the plant signals activate complex geometrical forms. Simon Howden composes 'human' music which is mixed live with the plant signals, allowing us to explore co-creation with living plants as a posthuman mode of artistic research.","accessible_pdf":false,"authors":[{"affiliations":["Queensland University of Technology, Brisbane, Australia","UnCalculated Studio, Brisbane, Australia"],"email":"rewa.wright@qut.edu.au","is_corresponding":true,"name":"Rewa Wright"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1078","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"BioRhythms: Artistic research with plants, real-time animation and sound","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1079","abstract":"This artwork was born of witnessing my grandmother's memory regression due to dementia, where her cherished stories dissolved into fragmented words. Dr. Mary Steedly once described memories as a \"densely layered, sometimes conflictual negotiation with the passage of time\", and in 2022, over 50 million people faced this painful reality of memory loss due to Alzheimer's and related dementias. Yet, amidst this poignant backdrop, the emergence of text-to-image AI systems in 2022 offered a glimmer of new perspective, as they harnessed the power of language to imagine and reassemble fragmented memories, possibly to weave what time and disease had stolen. \u200b When we coexist with machines, will we accumulate synthetic recollections of collective symbiotic imagination? Is language capable of re-weaving and synthesizing memories? How does our collective memory inspire new visual forms and alternative narratives? Recollection is an assemblage of intimate human-machine artifacts that emphasizes the contributions from three sides: artists, machines, and participants. This customized AI application facilitates multiple AI techniques, like speech recognition, text auto-completion, and text-to-image, to convert language input into image sequences of new memories. As an interactive experience, participants will whisper their personal memories with fragmented sentences, and our system will automatically fill in details, creating new touching visual memories. We developed our customized AI system by fine-tuning a pre-trained transformer-based AI model to learn the documentaries of Alzheimer patients\u2019 visual memories and their descriptions. The system imagines new memories of \"love\" and \"loss\" by interpreting real-time narratives from participants in the installation. Our system emerges as a vibrant and inclusive conversation starter, transcending boundaries with support for over 89 different languages, embracing the diverse cultural artifacts. In the art installation, we chose not to showcase the direct visual output generated by our AI system. Instead, we drew inspiration from fine-art practices such as the Monotype, a printmaking technique tracing its origins to the 1640s, and slitscan photography, known for capturing sequential slices of a subject over time. We aimed to present ReCollection by combining generative methodologies with fine-art practices, investigating new aesthetics that explore the fleeting visual imagery, undergoing dissolution, tilting, printing, and reprinting over time. By providing a conceptual framework for non-linear narratives, which constitute symbiotic imaginations, and future scenarios of memories, culture production, and reproductions. It may inspire the cure for memory regression by providing a future scenario, a thought experiment, and an intimate recollection of symbiosis between beings and apparatus. It raises people's awareness of future memory preservation and their empathy for the dementia community through a personalized aesthetic experience. It offers an artistic approach and future prototype for cultural heritage reproduction and re-imagination and explores the tensions that exist in the co-relations between visual representations, language, and narratives.","accessible_pdf":false,"authors":[{"affiliations":["Arizona State University, Tempe, United States"],"email":"weidizhang@ucsb.edu","is_corresponding":true,"name":"weidi zhang"},{"affiliations":["Independant Researcher, Beijing, China"],"email":"jieliang@ucsb.edu","is_corresponding":false,"name":"Jieliang Luo"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1079","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"ReCollection","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1094","abstract":"Our work builds on the study of notational systems in the context of rap music and offers rich insights into the complexities of language, culture, and expression in a postcolonial culture. We developed our algorithm by analyzing the classic hip-hop song \u201c93 till Infinity\u201d by Souls of Mischief. Isolating each individual instrument is typical for MIDI files, but data is not available in this format for songs recorded before the new millennium, which were laid on 2\u201d cellulose tapes. Thus, we recreated the song through sampling from the original mp4 format, which only supplies one track of data. As we only needed enough data to map to a visually legible design, the quality of this data was not \u2018audio quality\u2019, however, we would not have been able to computationally visualize a song of this vintage without it. With Rap Tapestry, we provide a new mode of expression for understanding the structure and flow of a rap song, mapping each instrument track individually, in combination with colored dots reflecting the rhyming patterns within the rap lyrics. The piece can be experienced in tandem with the audio or in the digital system for a finer grained level of analysis.","accessible_pdf":false,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"c.hull@northeastern.edu","is_corresponding":true,"name":"Carmen Hull"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1094","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"Rap Tapestry: A Music Visualization Tool with Physical Weaving Data Physicalization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1097","abstract":"Inspired by Wagashi, the traditional Japanese confection art regarded as a microcosm of time, space and nature, DataWagashi is a new medium aiming to make data tangible, accessible and fun by blending taste, smell, touch, texture, and physical interaction into the vocabulary of data communication. By embracing a sensory upgrade from data visualization to data physicalization, Data Wagashi turns data into an experience that is sharable among people and accessible to those with different sensory capabilities, making complex environmental data approachable, foster empathy, and empower people to make better choices.","accessible_pdf":false,"authors":[{"affiliations":["VLab, Cambridge, United States","Independent Designer, Cambridge, United States"],"email":"isabel.tg.wang@gmail.com","is_corresponding":true,"name":"Tiange Wang"},{"affiliations":["VLab, Cambridge, United States","Independent Designer, Cambridge, United States"],"email":"ihuang@gsd.harvard.edu","is_corresponding":false,"name":"I-Yang Huang"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1097","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"DataWagashi: Feeling Climate Data via New Design Medium","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1103","abstract":"With armed conflicts and wars continuing to occur globally, the pursuit of peace is an enduring concern. In the efforts to resolve these conflicts, a vast number of peace agreements have been signed. In this project, we examine the extent to which women and gender are explicitly acknowledged or addressed in peace agreements. Using debossing, we physicalize the mentions of women and gender in these agreements as a means to increase awareness and recognition of these often-overlooked constituencies.","accessible_pdf":false,"authors":[{"affiliations":["The University of Edinburgh, Edinburgh, United Kingdom"],"email":"jinrui.w@outlook.com","is_corresponding":false,"name":"Jinrui Wang"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"jennylzx@outlook.com","is_corresponding":true,"name":"Jenny Long"},{"affiliations":["School of Law (PeaceRep), Edinburgh, United Kingdom"],"email":"tvancisi@ed.ac.uk","is_corresponding":false,"name":"Tomas Vancisin"},{"affiliations":["School of Law (PeaceRep), Edinburgh, United Kingdom"],"email":"laura.wise@ed.ac.uk","is_corresponding":false,"name":"Laura Wise"},{"affiliations":["Newcastle University, Newcastle Upon Tyne, United Kingdom"],"email":"xinhuan.shu@gmail.com","is_corresponding":false,"name":"Xinhuan Shu"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"tcapel@ed.ac.uk","is_corresponding":false,"name":"Tara Capel"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"uhinrich@ed.ac.uk","is_corresponding":false,"name":"Uta Hinrichs"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1103","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"Pieces of Peace: Women and Gender in Peace Agreements","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1004","abstract":"Large Language Models (LLMs) have been widely applied in summarization due to their speedy and high-quality text generation. Summarization for sensemaking involves information compression and insight extraction. Human guidance in sensemaking tasks can prioritize and cluster relevant information for LLMs. However, users must translate their cognitive thinking into natural language to communicate with LLMs. Can we use more readable and operable visual representations to guide the summarization process for sensemaking? Therefore, we propose introducing an intermediate step--a schematic visual workspace for human sensemaking--before the LLM generation to steer and refine the summarization process. We conduct a series of proof-of-concept experiments to investigate the potential for enhancing the summarization by GPT-4 through visual workspaces. Leveraging a textual sensemaking dataset with a ground truth summary, we evaluate the impact of a human-generated visual workspace on LLM-generated summarization of the dataset and assess the effectiveness of space-steered summarization. We categorize several types of extractable information from typical human workspaces that can be injected into engineered prompts to steer the LLM summarization. The results demonstrate how such workspaces can help align an LLM with the ground truth, leading to more accurate summarization results than without the workspaces.","accessible_pdf":false,"authors":[{"affiliations":["Computer Science Department, Blacksburg, United States"],"email":"tangxxwhu@gmail.com","is_corresponding":true,"name":"Xuxin Tang"},{"affiliations":["Dod, Laurel, United States"],"email":"ericpkrokos@gmail.com","is_corresponding":false,"name":"Eric Krokos"},{"affiliations":["Department of Defense, College Park, United States"],"email":"visual.tycho@gmail.com","is_corresponding":false,"name":"Kirsten Whitley"},{"affiliations":["City University of Hong Kong, Hong Kong, China"],"email":"canliu@cityu.edu.hk","is_corresponding":false,"name":"Can Liu"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"naren@cs.vt.edu","is_corresponding":false,"name":"Naren Ramakrishnan"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"north@vt.edu","is_corresponding":false,"name":"Chris North"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1004","image_caption":"We created an intermediate workspace based on the ground truth of an intelligence analysis dataset to better understand the enhancements in LLM summarization achieved by integrating the worksapce. We then conducted proof-of-concept experiments to assess how the workspace and each type of information impact LLM summarization. The experiment pipeline and simulated workspace is shown in the image.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Steering LLM Summarization with Visual Workspaces for Sensemaking","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1007","abstract":"We explore the use of segmentation and summarization methods for the generation of real-time conversation topic timelines, in the context of glanceable Augmented Reality (AR) visualization. Conversation timelines may serve to summarize and contextualize conversations as they are happening, helping to keep conversations on track. Because dialogue and conversations are broad and unpredictable by nature, and our processing is being done in real-time, not all relevant information may be present in the text at the time it is processed. Thus, we present considerations and challenges which may not be as prevalent in traditional implementations of topic classification and dialogue segmentation. Furthermore, we discuss how AR visualization requirements and design practices require an additional layer of decision making, which must be factored directly into the text processing algorithms. We explore three segmentation strategies -- using dialogue segmentation based on the text of the entire conversation, segmenting on 1-minute intervals, and segmenting on 10-second intervals -- and discuss our results.","accessible_pdf":false,"authors":[{"affiliations":["University of Calgary, Calgary, Canada"],"email":"shanna.hollingwor1@ucalgary.ca","is_corresponding":true,"name":"Shanna Li Ching Hollingworth"},{"affiliations":["University of Calgary, Calgary, Canada"],"email":"wj@wjwillett.net","is_corresponding":false,"name":"Wesley Willett"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1007","image_caption":"A screenshot of an early system prototype of a real-time conversation timeline visualized in augmented reality, broken into 10-second chunks of conversation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Towards Real-Time Speech Segmentation for Glanceable Conversation Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1008","abstract":"Academic literature reviews have traditionally relied on techniques such as keyword searches and accumulation of relevant back-references, using databases like Google Scholar or IEEEXplore. However, both the precision and accuracy of these search techniques is limited by the presence or absence of specific keywords, making literature review akin to searching for needles in a haystack. We present vitaLITy 2, a solution that uses a Large Language Model or LLM-based approach to identify semantically relevant literature in a textual embedding space. We include a corpus of 66,692 papers from 1970-2023 which are searchable through text embeddings created by three language models. vitaLITy 2 contributes a novel Retrieval Augmented Generation (RAG) architecture and can be interacted with through an LLM with augmented prompts, including summarization of a collection of papers. vitaLITy 2 also provides a chat interface that allow users to perform complex queries without learning any new programming language. This also enables users to take advantage of the knowledge captured in the LLM from its enormous training corpus. Finally, we demonstrate the applicability of vitaLITy 2 through two usage scenarios.","accessible_pdf":false,"authors":[{"affiliations":["University of Nottingham, Nottingham, United Kingdom"],"email":"psxah15@nottingham.ac.uk","is_corresponding":true,"name":"Hongye An"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"arpitnarechania@gatech.edu","is_corresponding":false,"name":"Arpit Narechania"},{"affiliations":["University of Nottingham, Nottingham, United Kingdom"],"email":"kai.xu@nottingham.ac.uk","is_corresponding":false,"name":"Kai Xu"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1008","image_caption":"The figure shows a diagram of the system architecture of VITALITY 2. VITALITY 2 is an innovative platform aimed at streamlining academic literature search and review. It uses Large Language Models to identify relevant papers, providing a chat interface for natural language queries.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.13450","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1008/w-nlviz-1008_Preview.mp4?token=y2MR5-H0oG3Jtsan-bnhnSpndim7GH_XnD9XZ1hb_40&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1008/w-nlviz-1008_Preview.srt?token=AjUOxTsJYV8q8pajUb3Qnf3cG940axo4M5xIqK0jMLA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"hXf2ythEUrk","session_youtube_ff_link":"https://youtu.be/hXf2ythEUrk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"vitaLITy 2: Reviewing Academic Literature Using Large Language Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1009","abstract":"Analyzing and finding anomalies in multi-dimensional datasets is a cumbersome but vital task across different domains. In the context of financial fraud detection, analysts must quickly identify suspicious activity among transactional data. This is an iterative process made of complex exploratory tasks such as recognizing patterns, grouping, and comparing. To mitigate the information overload inherent to these steps, we present a tool combining automated information highlights, Large Language Model generated textual insights, and visual analytics, facilitating exploration at different levels of detail. We perform a segmentation of the data per analysis area and visually represent each one, making use of automated visual cues to signal which require more attention. Upon user selection of an area, our system provides textual and graphical summaries. The text, acting as a link between the high-level and detailed views of the chosen segment, allows for a quick understanding of relevant details. A thorough exploration of the data comprising the selection can be done through graphical representations. The feedback gathered in a study performed with seven domain experts suggests our tool effectively supports and guides exploratory analysis, easing the identification of suspicious information.","accessible_pdf":false,"authors":[{"affiliations":["Feedzai, Lisbon, Portugal"],"email":"beatriz.feliciano@feedzai.com","is_corresponding":true,"name":"Beatriz Feliciano"},{"affiliations":["Feedzai, Lisbon, Portugal"],"email":"rita.costa@feedzai.com","is_corresponding":false,"name":"Rita Costa"},{"affiliations":["Feedzai, Porto, Portugal"],"email":"jean.alves@feedzai.com","is_corresponding":false,"name":"Jean Alves"},{"affiliations":["Feedzai, Madrid, Spain"],"email":"javier.liebana@feedzai.com","is_corresponding":false,"name":"Javier Li\u00e9bana"},{"affiliations":["Feedzai, Lisbon, Portugal"],"email":"diogo.duarte@feedzai.com","is_corresponding":false,"name":"Diogo Ramalho Duarte"},{"affiliations":["Feedzai, Lisbon, Portugal"],"email":"pedro.bizarro@feedzai.com","is_corresponding":false,"name":"Pedro Bizarro"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1009","image_caption":"The interface guides the analysis of financial multi-dimensional datasets through multiple levels of detail exploration. It is composed of (A) a region where the alert is segmented in the subgroups that compose it (A.1, A.2, A.3, A.4, A.5, and A.6) and where groups that require more attention (in this case, A.5) are highlighted in red; (B) an automatically generated text summary of a selected area (A.3) that provides a broad understanding of the group; and (C) an interactive graphical representation of all the data points of the selected area to explore information in detail.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1009/w-nlviz-1009_Preview.mp4?token=g34tqFW7hEOO-D0vRVGUskUrSvO9BTa5jnRCboZ1bF4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1009/w-nlviz-1009_Preview.srt?token=vytwDrOsrTkd45XIC6aDkP-CnIfx3qT1SYSK5I1lwx0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"ywuG-oB69rs","session_youtube_ff_link":"https://youtu.be/ywuG-oB69rs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"\u201cShow Me What\u2019s Wrong!\u201d: Combining Charts and Text to Guide Data Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1010","abstract":"Dimension reduction (DR) can transform high-dimensional text embeddings into a 2D visual projection facilitating the exploration of document similarities. However, the projection often lacks connection to the text semantics, due to the opaque nature of text embeddings and non-linear dimension reductions. To address these problems, we propose a gradient-based method for visualizing the spatial semantics of dimensionally reduced text embeddings. This method employs gradients to assess the sensitivity of the projected documents with respect to the underlying words. The method can be applied to existing DR algorithms and text embedding models. Using these gradients, we designed a visualization system that incorporates spatial word clouds into the document projection space to illustrate the impactful text features. We further present three usage scenarios that demonstrate the practical applications of our system to facilitate the discovery and interpretation of underlying semantics in text projections.","accessible_pdf":false,"authors":[{"affiliations":["Computer Science, Virginia Tech, Blacksburg, United States"],"email":"wliu3@vt.edu","is_corresponding":true,"name":"Wei Liu"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"north@vt.edu","is_corresponding":false,"name":"Chris North"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"rfaust1@tulane.edu","is_corresponding":false,"name":"Rebecca Faust"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1010","image_caption":"Document projection of COVID-19 open research articles with gradient-based word explanations. (Top) A projection from a BERT model fine-tuned based on the data domain, featuring a spatial word cloud that captures the spatial semantics by showing key words that impact the projection. (Bottom) A heatmap of word impacts in a selected document, highlighting the word \"smoking\", which reflects the domain context. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.03949","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1010/w-nlviz-1010_Preview.mp4?token=bSHHu08UdZr9ZXWQZxXbwqotHqmKVJqnMcQx3z8Nou8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"P-20dcQY1wI","session_youtube_ff_link":"https://youtu.be/P-20dcQY1wI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Visualizing Spatial Semantics of Dimensionally Reduced Text Embeddings","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1011","abstract":"Recently, large language models (LLMs) have shown great promise in translating natural language (NL) queries into visualizations, but their \u201cblack-box\u201d nature often limits explainability and debuggability. In response, we present a comprehensive text prompt that, given a tabular dataset and an NL query about the dataset, generates an analytic specification including (detected) data attributes, (inferred) analytic tasks, and (recommended) visualizations. This specification captures key aspects of the query translation process, affording both explainability and debuggability. For instance, it provides mappings from the detected entities to the corresponding phrases in the input query, as well as the specific visual design principles that determined the visualization recommendations. Moreover, unlike prior LLM-based approaches, our prompt supports conversational interaction and ambiguity detection capabilities. In this paper, we detail the iterative process of curating our prompt, present a preliminary performance evaluation using GPT-4, and discuss the strengths and limitations of LLMs at various stages of query translation.","accessible_pdf":true,"authors":[{"affiliations":["UNC Charlotte, Charlotte, United States"],"email":"ssah1@uncc.edu","is_corresponding":false,"name":"Subham Sah"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"rmitra34@gatech.edu","is_corresponding":true,"name":"Rishab Mitra"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"arpitnarechania@gatech.edu","is_corresponding":false,"name":"Arpit Narechania"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"endert@gatech.edu","is_corresponding":false,"name":"Alex Endert"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"john.stasko@cc.gatech.edu","is_corresponding":false,"name":"John Stasko"},{"affiliations":["UNC Charlotte, Charlotte, United States"],"email":"wdou1@uncc.edu","is_corresponding":false,"name":"Wenwen Dou"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1011","image_caption":"Figure showing NL4DV-LLM pipeline for Generating Analytic Specifications for Data Visualization from Natural Language Queries using Large Language Models.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.13391","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1011/w-nlviz-1011_Preview.mp4?token=NhjPytOjKrz329cylPDzMT_PCDr8y8g0oEUWNDbzGmk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1011/w-nlviz-1011_Preview.srt?token=9fl8NRyQX_o9vqmKY6hjIx3GswiLz8EEAJbJskYGJpA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"jF33mGxryrM","session_youtube_ff_link":"https://youtu.be/jF33mGxryrM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Generating Analytic Specifications for Data Visualization from Natural Language Queries using Large Language Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1016","abstract":"We explore how natural language authoring with large language models (LLMs) can support the inline authoring of word-scale visualizations (WSVs).While word-scale visualizations that live alongside and within document text can support rich integration of data into written narratives and communication, these small visualizations have typically been challenging to author. We explore how modern LLMs---which are able to generate diverse visualization designs based on simple natural language descriptions---might allow authors to specify and insert new visualizations inline as they write text.Drawing on our experiences with an initial prototype built using GPT-4, we highlight the expressive potential of inline natural language visualization authoring and identify opportunities for further research.","accessible_pdf":true,"authors":[{"affiliations":["University of Calgary, Calgary, Canada"],"email":"paige.sobrien@ucalgary.ca","is_corresponding":true,"name":"Paige So'Brien"},{"affiliations":["University of Calgary, Calgary, Canada"],"email":"wj@wjwillett.net","is_corresponding":false,"name":"Wesley Willett"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1016","image_caption":"This image is a screenshot of an editor application where authors can create and embed word-scale visualizations for text using LLM capabilities. The screenshot of the application includes a text area where authors can add their content. Below the text area there is a search bar for authors to submit plain language instructions for creating a visualization. In the text area, the numbers 1 2 3 4 are highlighted and used to generate a bar chart of the four values displayed inline with the text. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1016/w-nlviz-1016_Preview.mp4?token=uc0UehyL9uXCt3ew8c1Uusat150b8TfAqMhbW79z_hs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1016/w-nlviz-1016_Preview.srt?token=oUhIwPjdOVxBNz_MOd7pjd2MO9LYuu5jykJxvxYK_Vo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"xNb6NcY2Rpo","session_youtube_ff_link":"https://youtu.be/xNb6NcY2Rpo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Towards Inline Natural Language Authoring for Word-Scale Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1019","abstract":"As language models have become increasingly successful at a wide array of tasks, different prompt engineering methods have been developed alongside them in order to adapt these models to new tasks. One of them is Tree-of-Thoughts (ToT), a prompting strategy and framework for language model inference and problem-solving. It allows the model to explore multiple solution paths and select the best course of action, producing a tree-like structure of intermediate steps (i.e., thoughts). This method was shown to be effective for several problem types. However, the official implementation has a high barrier to usage as it requires setup overhead and incorporates task-specific problem templates which are difficult to generalize to new problem types. It also does not allow user interaction to improve or suggest new thoughts. We introduce iToT (interactive Tree-of- Thoughts), a generalized and interactive Tree of Thought prompting system. iToT allows users to explore each step of the model\u2019s problem-solving process as well as to correct and extend the model\u2019s thoughts. iToT revolves around a visual interface that facilitates simple and generic ToT usage and transparentizes the problem-solving process to users. This facilitates a better understanding of which thoughts and considerations lead to the model\u2019s final decision. Through two case studies, we demonstrate the usefulness of iToT in different human-LLM co-writing tasks.","accessible_pdf":true,"authors":[{"affiliations":["ETHZ, Zurich, Switzerland"],"email":"aboyle@student.ethz.ch","is_corresponding":false,"name":"Alan David Boyle"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"igupta@ethz.ch","is_corresponding":false,"name":"Isha Gupta"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"shoenig@student.ethz.ch","is_corresponding":false,"name":"Sebastian H\u00f6nig"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"lukas.mautner98@gmail.com","is_corresponding":false,"name":"Lukas Mautner"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"kenza.amara@ai.ethz.ch","is_corresponding":false,"name":"Kenza Amara"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"furui.cheng@inf.ethz.ch","is_corresponding":false,"name":"Furui Cheng"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"melassady@ai.ethz.ch","is_corresponding":false,"name":"Mennatallah El-Assady"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1019","image_caption":"We introduce iToT (interactive Tree-of-Thoughts), a generalized and interactive Tree of Thought prompting system. The iToT workflow: During initialization, the user provides an input prompt describing the task, examples of successful sequences of thoughts, and an evaluation prompt with self-evaluation criteria. They also specify the model parameters and visualization settings (1). During the generation process, the parametrized model produces a set of ranked candidate thoughts. The user can expand on these model-generated thoughts or add a new custom thought (2). Finally, iToT offers evaluation: thoughts are ranked by the model's self-evaluation and assessed based on their semantic similarity and self-consistency (3).","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.00413","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1019/w-nlviz-1019_Preview.mp4?token=nS_Jlckf5IkzLwNdlu5KO-1wQYvURkYMYbBURIvPK_Q&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1019/w-nlviz-1019_Preview.srt?token=x4UYP36Yra1jccXARMK6kkUITOgXtpQqWN-jCYx74fQ&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"hj2FVIZWiSk","session_youtube_ff_link":"https://youtu.be/hj2FVIZWiSk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"iToT: An Interactive System for Customized Tree-of-Thought Generation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1020","abstract":"Strategy management analyses are created by business consultants with common analysis frameworks (i.e. comparative analyses) and associated diagrams. We show these can be largely constructed using LLMs, starting with the extraction of insights from data, organization of those insights according to a strategy management framework, and then depiction in the typical strategy management diagram for that framework (static textual visualizations). We discuss caveats and future directions to generalize for broader uses.","accessible_pdf":false,"authors":[{"affiliations":["Uncharted Software, Toronto, Canada"],"email":"richard.brath@alumni.utoronto.ca","is_corresponding":true,"name":"Richard Brath"},{"affiliations":["Uncharted Software, Toronto, Canada"],"email":"miltonjbradley@gmail.com","is_corresponding":false,"name":"Adam James Bradley"},{"affiliations":["Uncharted Software, Toronto, Canada"],"email":"david@jonker.work","is_corresponding":false,"name":"David Jonker"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1020","image_caption":"From insight generation to diagram by LLM: 1. The LLM generates insights from data. 2. The LLM organizes insights by a strategy management analysis framework, e.g. Porter\u2019s Five Forces of Value Discipline. 3. The LLM generates the corresponding strategy management diagram.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1020/w-nlviz-1020_Preview.mp4?token=C8aMLxtsocipeEwrixc0-JxHGGPSavrm0QgbrcxLsgA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1020/w-nlviz-1020_Preview.srt?token=2ZhBfAnDs3zJFFuL8WFB8HZ6wsLp0zz9las4-yAVirQ&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"aefl1VsQPDc","session_youtube_ff_link":"https://youtu.be/aefl1VsQPDc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Strategic management analysis: from data to strategy diagram by LLM","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1021","abstract":"We present a mixed-methods study to explore how large language models (LLMs) can assist users in the visual exploration and analysis of complex data structures, using knowledge graphs (KGs) as a baseline. We surveyed and interviewed 20 professionals who regularly work with LLMs with the goal of using them for (or alongside) KGs. From the analysis of our interviews, we contribute a preliminary roadmap for the design of LLM-driven visual analysis systems and outline future opportunities in this emergent design space.","accessible_pdf":false,"authors":[{"affiliations":["MIT Lincoln Laboratory, Lexington, United States"],"email":"harry.li@ll.mit.edu","is_corresponding":false,"name":"Harry Li"},{"affiliations":["Tufts University, Medford, United States"],"email":"gabriel.appleby@tufts.edu","is_corresponding":false,"name":"Gabriel Appleby"},{"affiliations":["MIT Lincoln Laboratory, Lexington, United States"],"email":"ashley.suh@ll.mit.edu","is_corresponding":true,"name":"Ashley Suh"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1021","image_caption":"We present a mixed-methods study to explore how large language models (LLMs) can assist users in the visual exploration and analysis of complex data structures, using knowledge graphs (KGs) as a baseline. We surveyed and interviewed 20 professionals who regularly work with LLMs with the goal of using them for (or alongside) KGs. From the analysis of our interviews, we contribute a preliminary roadmap for the design of LLM-driven visual analysis systems and outline future opportunities in this emergent design space.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"A Preliminary Roadmap for LLMs as Visual Data Analysis Assistants","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1022","abstract":"This study explores the potential of visual representation in understanding the structural elements of Arabic poetry, a subject of significant educational and research interest. Our objective is to make Arabic poetic works more accessible to readers of both Arabic and non-Arabic linguistic backgrounds by employing visualization, exploration, and analytical techniques. We transformed poetry texts into syllables, identified their metrical structures, segmented verses into patterns, and then converted these patterns into visual representations. Following this, we computed and visualized the dissimilarities between these images, and overlaid their differences. Our findings suggest that the positional patterns across a poem play a pivotal role in effective poetry clustering, as demonstrated by our newly computed metrics. The results of our clustering experiments showed a marked improvement over previous attempts, thereby providing new insights into the composition and structure of Arabic poetry. This study underscored the value of visual representation in enhancing our understanding of Arabic poetry.","accessible_pdf":true,"authors":[{"affiliations":["University of Neuch\u00e2tel, Neuch\u00e2tel, Switzerland"],"email":"abdelmalek.berkani@unine.ch","is_corresponding":true,"name":"Abdelmalek Berkani"},{"affiliations":["University of Neuch\u00e2tel, Neuch\u00e2tel, Switzerland"],"email":"adrian.holzer@unine.ch","is_corresponding":false,"name":"Adrian Holzer"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1022","image_caption":"This image illustrates the overlay of structural and color differences between the first 10 lines of two poems, converted into images after detecting the meter and patterns. The analysis of these differences led to the calculation of comparison and classification metrics.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Enhancing Arabic Poetic Structure Analysis through Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-1762","abstract":"Weather can have a significant impact on the power grid. Heat and cold waves lead to increased energy use as customers cool or heat their space, while simultaneously hampering energy production as the environment deviates from ideal operating conditions. Extreme heat has previously melted power cables, while extreme cold can cause vital parts of the energy infrastructure to freeze. Utilities have reserves to compensate for the additional energy use, but in extreme cases which fall outside the forecast energy demand, the impact on the power grid can be severe. In this paper, we present an interactive tool to explore the relationship between weather and power outages. We demonstrate its use with the example of the impact of Winter Storm Uri on Texas in February 2021.","accessible_pdf":false,"authors":[{"affiliations":["Institute of Computer Science, Leipzig University, Leipzig, Germany"],"email":"nsonga@informatik.uni-leipzig.de","is_corresponding":true,"name":"Baldwin Nsonga"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"andy.berres@gmail.com","is_corresponding":false,"name":"Andy S Berres"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"bobby.jeffers@nrel.gov","is_corresponding":false,"name":"Robert Jeffers"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"caitlyn.clark6@icloud.com","is_corresponding":false,"name":"Caitlyn Clark"},{"affiliations":["University of Kaiserslautern, Kaiserslautern, Germany"],"email":"hagen@cs.uni-kl.de","is_corresponding":false,"name":"Hans Hagen"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"scheuermann@informatik.uni-leipzig.de","is_corresponding":false,"name":"Gerik Scheuermann"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-1762","image_caption":"Weather can have a significant impact on the power grid. In this paper, we propose an interactive tool to explore the relationship between weather and power outages. We demonstrate its use with the example of the impact of winter storm Uri on Texas in February 2021. While the number of affected customers by county, median temperatures, and unavailable power are shown in juxtaposed timelines for easy temporal comparison, the map view shows the spatial distribution of temperature and outages. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-1762/w-energyvis-1762_Preview.mp4?token=QQ_HHel2WPYLN9wZuayBdQrrivNDAdvyec2pZEtlD3M&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-1762/w-energyvis-1762_Preview.srt?token=QbRAW3fYWlzBPpqEXdBPXQeXb2bg44S4nB1578sd1Og&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"i6cHT3DHCm4","session_youtube_ff_link":"https://youtu.be/i6cHT3DHCm4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Extreme Weather and the Power Grid: A Case Study of Winter Storm Uri","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-2646","abstract":"With the growing penetration of inverter-based distributed energy resources and increased loads through electrification, power systems analyses are becoming more important and more complex. Moreover, these analyses increasingly involve the combination of interconnected energy domains with data that are spatially and temporally increasing in scale by orders of magnitude, surpassing the capabilities of many existing analysis and decision-support systems. We present the architectural design, development, and application of a high-resolution web-based visualization environment capable of cross-domain analysis of tens of millions of energy assets, focusing on scalability and performance. Our system supports the exploration, navigation, and analysis of large data from diverse domains such as electrical transmission and distribution systems, mobility and electric vehicle charging networks, communications networks, cyber assets, and other supporting infrastructure. We evaluate this system across multiple use cases, describing the capabilities and limitations of a web-based approach for high-resolution energy system visualizations.","accessible_pdf":false,"authors":[{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"graham.johnson@nrel.gov","is_corresponding":true,"name":"Graham Johnson"},{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"sam.molnar@nrel.gov","is_corresponding":false,"name":"Sam Molnar"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"nicholas.brunhart-lupo@nrel.gov","is_corresponding":false,"name":"Nicholas Brunhart-Lupo"},{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"kenny.gruchalla@nrel.gov","is_corresponding":false,"name":"Kenny Gruchalla"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-2646","image_caption":"Image Description: Snapshot of the 100-megapixel high-resolution display with an interactive visualization in the browser. Two synthetic energy model topologies are shown: an electrical transmission system (blue lines) and a corresponding distribution system (orange points) in the San Francisco Bay area. These two models have over 12 million combined features. We discuss the capabilities of different rendering approaches such as vector tiling, aggregation techniques, and efficient binary formats. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Architecture for Web-Based Visualization of Large-Scale Energy Domains","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-2743","abstract":"In the pursuit of achieving net-zero greenhouse gas emissions by 2050, policymakers and researchers require sophisticated tools to explore and compare various climate transition scenarios. This paper introduces the Pathways Explorer, an innovative visualization tool designed to facilitate these comparisons by providing an interactive platform that allows users to select, view, and dissect multiple pathways towards sustainability. Developed in collaboration with the \u00ab\u00a0Institut de l\u2019\u00e9nergie Trottier\u00a0\u00bb (IET), this tool leverages a technoeconomic optimization model to project the energy transformation needed under different constraints and assumptions. We detail the design process that guided the development of the Pathways Explorer, focusing on user-centered design challenges and requirements. A case study is presented to demonstrate how the tool has been utilized by stakeholders to make informed decisions, highlighting its impact and effectiveness. The Pathways Explorer not only enhances understanding of complex climate data but also supports strategic planning by providing clear, comparative visualizations of potential future scenarios.","accessible_pdf":false,"authors":[{"affiliations":["Kashika Studio, Montreal, Canada"],"email":"francois.levesque@polymtl.ca","is_corresponding":false,"name":"Fran\u00e7ois L\u00e9vesque"},{"affiliations":["Polytechnique Montreal, Montreal, Canada"],"email":"louis.beaumier@polymtl.ca","is_corresponding":false,"name":"Louis Beaumier"},{"affiliations":["Polytechnique Montreal, Montreal, Canada"],"email":"thomas.hurtut@polymtl.ca","is_corresponding":true,"name":"Thomas Hurtut"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-2743","image_caption":"Pathways Explorer allows policymakers and researchers to explore and compare various climate transition scenarios.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Pathways Explorer: Interactive Visualization of Climate Transition Scenarios","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-2845","abstract":"Methane (CH4) leakage monitoring is crucial for environmental protection and regulatory compliance, particularly in the oil and gas industries. Reducing CH4 emissions helps advance green energy by converting it into a valuable energy source through innovative capture technologies. A real-time continuous monitoring system (CMS) is necessary to detect fugitive and intermittent emissions and provide actionable insights. Integrating spatiotemporal data from satellites, airborne sensors, and ground sensors with inventory data and the weather research and forecasting (WRF) model creates a comprehensive dataset, making CMS feasible but posing significant challenges. These challenges include data alignment and fusion, managing heterogeneity, handling missing values, ensuring resolution integrity, and maintaining geometric and radiometric accuracy. This study outlines the procedure for methane leakage detection, addressing challenges at each step and offering solutions through machine learning and data analysis. It further details how visual analytics can be implemented to improve the effectiveness of the various aspects of emission monitoring.","accessible_pdf":false,"authors":[{"affiliations":["University of Oklahoma, Norman, United States"],"email":"parisa.masnadi@ou.edu","is_corresponding":true,"name":"Parisa Masnadi Khiabani"},{"affiliations":["University of Oklahoma, Norman, United States"],"email":"danala@ou.edu","is_corresponding":false,"name":"Gopichandh Danala"},{"affiliations":["University of Oklahoma, Norman, United States"],"email":"wolfgang.jentner@uni-konstanz.de","is_corresponding":false,"name":"Wolfgang Jentner"},{"affiliations":["University of Oklahoma, Oklahoma, United States"],"email":"ebert@ou.edu","is_corresponding":false,"name":"David Ebert"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-2845","image_caption":"The image shows how integrating top-down and bottom-up approaches for methane leakage detection addresses methodological gaps, enhancing the detection and understanding of emission sources and rates. This integration enables cross-validation, which improves both top-down and bottom-up modeling. Every step contributes to visualization, yet data analysis and visual analytics are not only crucial for providing precise feedback for modeling but also integral in enhancing each step of the process. These tools are key for tackling challenges in data integration, effectively managing information, and uncovering hidden patterns, ensuring continuous improvement across all stages.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Challenges in Data Integration, Monitoring, and Exploration of Methane Emissions: The Role of Data Analysis and Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-3496","abstract":"Transmission System Operators (TSO) often need to integrate multiple sources of information to make decisions in real time.In cases where a single power line goes offline, due to a natural event or scheduled outage, there typically will be a contingency plan that the TSO may utilize to mitigate the situation. In cases where two or more power lines go offline, this contingency plan is no longer valid, and they must re-prepare and reason about the network in real time. A key network property that must be balanced is loadability--the range of permissible voltage levels for a specific bus (or node), understood as a function of power and its active (P) and reactive (Q) components. Loadability provides information of how much more demand a specific node can handle, before system became unstable. To increase loadability, the TSO can potentially make control actions that raise or lower P or Q, which results in change the voltage levels required to be within permissible limits. While many methods exist to calculate loadability and represent loadability to end users, there has been little focus on tailoring loadability visualizations to the unique needs of TSOs. In this paper we involve operations domain experts in a human centered design process to prototype two new loadability visualizations for TSOs. We contribute a design paper that yields: (1) a working model of the operator's decision making process, (2) example artifacts of the two data visualization techniques, and (3) a critical qualitative expert review of our designs.","accessible_pdf":false,"authors":[{"affiliations":["Hitachi Energy Research, Montreal, Canada"],"email":"dmarino@cim.mcgill.ca","is_corresponding":true,"name":"David Marino"},{"affiliations":["Carleton University, Ottawa, Canada"],"email":"maxwellkeleher@cmail.carleton.ca","is_corresponding":false,"name":"Maxwell Keleher"},{"affiliations":["Hitachi Energy Research, Krakow, Poland"],"email":"krzysztof.chmielowiec@hitachienergy.com","is_corresponding":false,"name":"Krzysztof Chmielowiec"},{"affiliations":["Hitachi Energy Research, Montreal, Canada"],"email":"antony.hilliard@hitachienergy.com","is_corresponding":false,"name":"Antony Hilliard"},{"affiliations":["Hitachi Energy Research, Krakow, Poland"],"email":"pawel.dawidowski@hitachienergy.com","is_corresponding":false,"name":"Pawel Dawidowski"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-3496","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Operator-Centered Design of a Nodal Loadability Network Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-4135","abstract":"This paper presents a dashboard to find and compare days with similar weather patterns within an 80-year historical weather dataset. The dashboard facilitates the analysis of weather patterns and their impact on renewable energy generation by defining and identifying similar weather days. Users are given the flexibility to select the metric for determining similarity, which includes a combination of temperature, dew point, wind speed, Global Horizontal Irradiance (GHI), Direct Horizontal Irradiance (DHI), and cloud cover. The region for this work is limited to Texas. The dashboard then generates an output that compares the selected weather metrics and the corresponding renewable generation outputs.","accessible_pdf":false,"authors":[{"affiliations":["Texas A","M University, College Station, United States"],"email":"sanjanakunkolienkar@tamu.edu","is_corresponding":true,"name":"Sanjana Kunkolienkar"},{"affiliations":["Texas A","M University, College Station, United States"],"email":"nislavch@tamu.edu","is_corresponding":false,"name":"Nikola Slavchev"},{"affiliations":["Texas A","M University , College Station, United States"],"email":"fsafdarian@tamu.edu","is_corresponding":false,"name":"Farnaz Safdarian"},{"affiliations":["Texas A","M University, College Station, United States"],"email":"overbye@tamu.edu","is_corresponding":false,"name":"Thomas Overbye"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-energyvis-4135","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Developing a Dashboard To Enhance Visualization of Similar Historical Weather Patterns and Renewable Energy Generation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-4332","abstract":"The rapid growth of the solar energy industry requires advanced educational tools to train the next generation of engineers and technicians. We present a novel system for situated visualization of photovoltaic (PV) module performance, leveraging a combination of PV simulation, sun-sky position, and head-mounted augmented reality (AR). Our system is guided by four principles of development: simplicity, adaptability, collaboration, and maintainability, realized in six components. Users interactively manipulate a physical module's orientation and shading referents with immediate feedback on the module's performance.","accessible_pdf":true,"authors":[{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"nicholas.brunhart-lupo@nrel.gov","is_corresponding":false,"name":"Nicholas Brunhart-Lupo"},{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"kenny.gruchalla@nrel.gov","is_corresponding":true,"name":"Kenny Gruchalla"},{"affiliations":["Fort Lewis College, Durango, United States"],"email":"williams_l@fortlewis.edu","is_corresponding":false,"name":"Laurie Williams"},{"affiliations":["Fort Lewis College, Durango, United States"],"email":"selias@fortlewis.edu","is_corresponding":false,"name":"Steve Ellis"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-4332","image_caption":"A simulated image showing a photovoltaic module's performance for workforce training. An augmented reality projection overlays simulation results onto a physical panel, depicting power flow with arrow and pipe glyphs. Sunlit cells are highlighted in yellow. Shadowed cells are bypassed by diodes and marked with spheres. The optical tracking marker in the foreground relays the panel\u2019s orientation to the system. Users can tilt or rotate the physical panel, adjust the virtual sun\u2019s position using time and geo-coordinate controls, and add virtual occluding objects to explore panel behavior under various conditions.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Situated Visualization of Photovoltaic Module Performance for Workforce Development","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-5170","abstract":"Scenario studies are a technique for representing a range of possible complex decisions through time, and analyzing the impact of those decisions on future outcomes of interest. It is common to use scenarios as a way to study potential pathways towards future build out and decarbonization of energy systems. The results of these studies are often used by diverse energy system stakeholders \u2014 such as community organizations, power system utilities, and policymakers \u2014 for decision-making using data visualization. However, the role of visualization in facilitating decision-making with energy scenario data is not well understood. In this work, we review common visualization designs employed in energy scenario studies and discuss the effectiveness of some of these techniques in facilitating different types of analysis with scenario data.","accessible_pdf":true,"authors":[{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"sam.molnar@nrel.gov","is_corresponding":true,"name":"Sam Molnar"},{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"kenny.gruchalla@nrel.gov","is_corresponding":false,"name":"Kenny Gruchalla"},{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"graham.johnson@nrel.gov","is_corresponding":false,"name":"Graham Johnson"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"kristi.potter@nrel.gov","is_corresponding":false,"name":"Kristi Potter"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-5170","image_caption":"Two visualizations of renewable site location and capacities for four different energy scenarios. a) Each site has a radar plot where the distance from the center indicates the capacity for the labeled scenario, as shown in the legend. Wind and solar sites are plotted as separate colors (blue and yellow, respectively). b) An aggregated visualization of scenario data where each site is colored according to the number of scenarios it occurs in and the resource type.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Opportunities and Challenges in the Visualization of Energy Scenarios for Decision-Making","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-6102","abstract":"This paper introduces CPIE (Coal Pollution Impact Explorer), a spatiotemporal visual analytic tool developed for interactive visualization of coal pollution impacts. CPIE visualizes electricity-generating units (EGUs) and their contributions to statewide Medicare deaths related to coal PM2.5 emissions. The tool is designed to make scientific findings on the impacts of coal pollution more accessible to the general public and to raise awareness of the associated health risks. We present three use cases for CPIE: 1) the overall spatial distribution of all 480 facilities in the United States, their statewide impact on excess deaths, and the overall decreasing trend in deaths associated with coal pollution from 1999 to 2020; 2) the influence of pollution transport, where most deaths associated with the facilities located within the same state and neighboring states but some deaths occur far away; and 3) the effectiveness of intervention regulations, such as installing emissions control devices and shutting down coal facilities, in significantly reducing the number of deaths associated with coal pollution.","accessible_pdf":false,"authors":[{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"sjin86@gatech.edu","is_corresponding":true,"name":"Sichen Jin"},{"affiliations":["George Mason University, Fairfax, United States"],"email":"lhennem@gmu.edu","is_corresponding":false,"name":"Lucas Henneman"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"jessica.roberts@cc.gatech.edu","is_corresponding":false,"name":"Jessica Roberts"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-6102","image_caption":"The user interface of CPIE shows the coal pollution impacts when Pennsylvania is selected. It consists of (A) a choropleth map view highlighting facilities in Pennsylvania and showing statewide deaths associated with all facilities in Pennsylvania, (B) a choropleth map displaying the number of deaths in Pennsylvania attributable to facilities in other states, and (C) a stacked line chart showing the changes in deaths associated with all Pennsylvania facilities from 1999 to 2020. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-6102/w-energyvis-6102_Preview.mp4?token=EINVooRDQh8xi0eCB3e_DTaID96kxyaD7YNFSvTZO1E&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-6102/w-energyvis-6102_Preview.srt?token=6t7FqJGJP_zsFQvtp7VhnXedhhpPkGgOMvEluTNJ9fI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"bhNcOjTG8IQ","session_youtube_ff_link":"https://youtu.be/bhNcOjTG8IQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"CPIE: A Spatiotemporal Visual Analytic Tool to Explore the Impact of Coal Pollution","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-9750","abstract":"This paper presents a novel open system, ChatGrid, for easy, intuitive, and interactive geospatial visualization of large-scale transmission networks. ChatGrid uses state-of-the-art techniques for geospatial visualization of large networks, including 2.5D map views, animated flows, hierarchical and level-based filtering and aggregation to provide visual information in an easy, cognitive manner. The highlight of ChatGrid is a natural language query based interface powered by a large language model (ChatGPT) that offers a natural and flexible interactive experience whereby users can ask questions and ChatGrid provides responses both in text and visually. This paper discusses the architecture, implementation, design decisions, and usage of large language models for ChatGrid.","accessible_pdf":false,"authors":[{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"sjin86@gatech.edu","is_corresponding":true,"name":"Sichen Jin"},{"affiliations":["Pacific Northwest National Laboratory, Richland, United States"],"email":"shrirang.abhyankar@pnnl.gov","is_corresponding":false,"name":"Shrirang Abhyankar"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-9750","image_caption":"ChatGrid interface displaying the visualization and query interface. Queries asked by users are responded through both text and visualization. The vertical bars represent the generation sources that have a remaining capacity greater than 100 MW.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-9750/w-energyvis-9750_Preview.mp4?token=G43Qaxtn7P8w0WvFMF6Rbu2gvhAUgB_s5zRe3DjAHp0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-9750/w-energyvis-9750_Preview.srt?token=O-gp28W-cRZ9vCDRgQWuEnavIHuHWp2rtLuQc8tryn8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"v_T0stnFeb8","session_youtube_ff_link":"https://youtu.be/v_T0stnFeb8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"ChatGrid: Power Grid Visualization Empowered by a Large Language Model","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-9875","abstract":"Large-scale power outages, such as those caused by extreme weather events, have a big impact on human behavior. A short power outage is merely a nuisance for most, and may not change people's locations. An outage that lasts for a few hours can result in spoiled food and medical supplies, and people will have to restock spoiled items. Long outages result in temperatures outside tolerable levels in homes, and may prompt people to acquire supplies, such as generators and gas, or change location. The long outages during Winter Storm Uri in Texas resulted in millions of dollars in property damage due to freezing pipes. This level of damage is expected to result in a sharp increase in supply runs and contractor activity. In this paper, we present a tool to explore differences in visiting patterns before, during, and after power outages. It allows to compare different points of interest like medical facilities, grocery stores, hardware stores, and other types of businesses.","accessible_pdf":false,"authors":[{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"andy.berres@gmail.com","is_corresponding":true,"name":"Andy S Berres"},{"affiliations":["Institute of Computer Science, Leipzig University, Leipzig, Germany"],"email":"nsonga@informatik.uni-leipzig.de","is_corresponding":false,"name":"Baldwin Nsonga"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"caitlyn.clark6@icloud.com","is_corresponding":false,"name":"Caitlyn Clark"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"bobby.jeffers@nrel.gov","is_corresponding":false,"name":"Robert Jeffers"},{"affiliations":["University of Kaiserslautern, Kaiserslautern, Germany"],"email":"hagen@cs.uni-kl.de","is_corresponding":false,"name":"Hans Hagen"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"scheuermann@informatik.uni-leipzig.de","is_corresponding":false,"name":"Gerik Scheuermann"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-9875","image_caption":"We present a visual analysis of the impact of the 2021 Texas Power Crisis on building occupancy in Austin, Texas. In February 2021, Winter Storm Uri caused temperatures to rapidly drop up to 50\u2109/25\u2103 below typical Texas winter temperatures (see comparison on the top left), and due to the isolated nature of the Texas powergrid, there was little room for compensation for the additional load and . The top right shows a heatmap comparison of power outages over time (x-axis) for different Texas counties (y-axis). The red line indicates the threshold for the 10% most affected counties (in the tool itself, hovering reveals more information about the counties and the extent of the outages). The tool provides navigation elements for users to select two timeframes they want to compare. In this case, we chose the 3 days with most intense outages, and an equivalent 3-day window two weeks prior, before the winter storm hit. The bottom shows buildings colored by POI type (for buildings with multiple POI, we chose the type with the highest importance \u2013 shown in the legend on the left). The map in the middle shows increases (green) and decreases (purple) in visits during the storm, compared with pre-storm conditions. The changes in visits/occupancy by POI subtype (colored by POI type) are shown on the bottom right. Large Event Spaces (which served as cold shelters) saw an increase in occupancy that\u2019s just a little over the decrease in occupancy of residential homes, and the visits to correctional facilities dropped dramatically.\u00a0 With the exception of the weather layer, all graphics come from MoVis, an interactive prototype we developed. To learn more about the weather impact on the power grid, see our other paper \u201cExtreme Weather and the Power Grid: A Case Study of Winter Storm Uri.\u201d ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-9875/w-energyvis-9875_Preview.mp4?token=tsVlw0ZOeOysdNfQ1Q9FMVlt54O3hFivdbokYCDQRSo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-9875/w-energyvis-9875_Preview.srt?token=1isu0PYjNMZEIjmSXAlWSh3Xpp2WWat8MM6KQ2Bx4y8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"al9x4utB7ss","session_youtube_ff_link":"https://youtu.be/al9x4utB7ss","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Evaluating the Impact of Power Outages on Occupancy Patterns During the 2021 Texas Power Crisis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1008","abstract":"With the increasing amount of data globally, analyzing and visualizing data are becoming essential skills across various professions. It is important to equip university students with these essential data skills. To learn, design, and develop data visualization, students need knowledge of programming and data science topics. Many university programs lack dedicated data science courses for undergraduate students, making it important to introduce these concepts through integrated courses. However, combining data science and data visualization into one course can be challenging due to the time constraints and the heavy load of learning. In this paper, we discuss the development of teaching data science and data visualization together in one course and share the results of the post-course evaluation survey. From the survey's results, we identified four challenges, including difficulty in learning multiple tools and diverse data science topics, varying proficiency levels with tools and libraries, and selecting and cleaning datasets. We also distilled five opportunities for developing a successful data science and visualization course. These opportunities include clarifying the course structure, emphasizing visualization literacy early in the course, updating the course content according to student needs, using large real-world datasets, learning from industry professionals, and promoting collaboration among students.","accessible_pdf":true,"authors":[{"affiliations":["Carleton University, Ottawa, Canada"],"email":"shrihariniramesh@cmail.carleton.ca","is_corresponding":true,"name":"Shri Harini Ramesh"},{"affiliations":["Carleton University, Ottawa, Canada","Bruyere Research Institute, Ottawa, Canada"],"email":"fateme.rajabiyazdi@carleton.ca","is_corresponding":false,"name":"Fateme Rajabiyazdi"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1008","image_caption":"Challenges and Opportunities of Teaching Data Visualization Together with Data Science","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.05969","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=0h47m22s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T13:10:00Z","title":"Challenges and Opportunities of Teaching Data Visualization Together with Data Science","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1013","abstract":"Academic advising can positively impact struggling students' success. We developed AdVizor, a data-driven learning analytics tool for academic risk prediction for advisors. Our system is equipped with a random forest model for grade prediction probabilities uses a visualization dashboard to allows advisors to interpret model predictions. We evaluated our system in mock advising sessions with academic advisors and undergraduate students at our university. Results show that the system can easily integrate into the existing advising workflow, and visualizations of model outputs can be learned through short training sessions. AdVizor supports and complements the existing expertise of the advisor while helping to facilitate advisor-student discussion and analysis. Advisors found the system assisted them in guiding student course selection for the upcoming semester. It allowed them to guide students to prioritize the most critical and impactful courses. Both advisors and students perceived the system positively and were interested in using the system in the future. Our results encourage the development of intelligent advising systems in higher education, catered for advisors.","accessible_pdf":false,"authors":[{"affiliations":["Ontario Tech University, Oshawa, Canada"],"email":"riley.weagant@ontariotechu.net","is_corresponding":false,"name":"Riley Weagant"},{"affiliations":["Ontario Tech University, Oshawa, Canada"],"email":"zixin.zhao@ontariotechu.net","is_corresponding":true,"name":"Zixin Zhao"},{"affiliations":["Ontario Tech University, Oshawa, Canada"],"email":"abradley@uncharted.software","is_corresponding":false,"name":"Adam Badley"},{"affiliations":["Ontario Tech University, Oshawa, Canada"],"email":"christopher.collins@ontariotechu.ca","is_corresponding":false,"name":"Christopher Collins"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1013","image_caption":"Figure of a student and academic advisor sitting across from each other with a computer screen between them, on top is a zoomed out image of the AdVizor interface.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1013/w-eduvis-1013_Preview.mp4?token=MEibWd4aMD5VjMhdpUG36Mlq-IT4gyQHec2_Pzkw40I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1013/w-eduvis-1013_Preview.srt?token=QZwYkdAHRWd5UxGYaX72aY20X_jhmYmrtp3uAB2V54w&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"0srC2ClVQTY","session_youtube_ff_link":"https://youtu.be/0srC2ClVQTY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/XeytaUH5Z8c&t=0h28m39s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T13:10:00Z","title":"AdVizor: Using Visual Explanations to Guide Data-Driven Student Advising","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1020","abstract":"In this paper, we discuss our experiences advancing a professional-oriented graduate program in Cartography & GIScience at the University of Wisconsin-Madison to account for fundamental shifts in conceptual framings, rapidly evolving mapping technologies, and diverse student needs. We focus our attention on considerations for the cartography curriculum given its relevance to (geo)visualization education and map literacy. We reflect on challenges associated with, and lessons learned from, developing a comprehensive and cohesive cartography curriculum across in-person and online learning modalities for a wide range of professional student audiences.","accessible_pdf":true,"authors":[{"affiliations":["University of Wisconsin-Madison, Madison, United States"],"email":"jknelson3@wisc.edu","is_corresponding":true,"name":"Jonathan Nelson"},{"affiliations":["University of Wisconsin-Madison, Madison, United States"],"email":"limpisathian@wisc.edu","is_corresponding":false,"name":"P. William Limpisathian"},{"affiliations":["University of Wisconsin-Madison, Madison, United States"],"email":"reroth@wisc.edu","is_corresponding":false,"name":"Robert Roth"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1020","image_caption":"Developing and maintaining a robust cartography curriculum is challenging yet essential for meeting the needs of the professional cartographer. The cartography curriculum at the University of Wisconsin-Madison (2024-25) is organized within a conceptual framework, consisting of an orthogonal pair of axes to capture both the traditional distinction between mapmaking and map use and the more contemporary distinction between cartographic representation and interaction. The curriculum is collaboratively developed, conceptually-grounded, technologically diverse, and integrated with open educational resources to ensure it remains current, relevant, and synchronized across in-person/online learning modalities.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=0h57m54s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T13:10:00Z","title":"Developing a Robust Cartography Curriculum to Train the Professional Cartographer","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1025","abstract":"Systems thinking is fundamental for understanding complex problems. Addressing twenty-first century challenges like climate change requires comprehending how different components of Earth systems influence each other. The carbon cycle, crucial to our planet\u2019s climate system, is a powerful context for helping the rising generation develop systems thinking skills. Traditional 2-D static images often fail to convey the complexities of the carbon cycle, making it challenging for learners. These representations do not communicate dynamic features of the carbon cycle, such as its multiple scales and interconnected processes. We hypothesize that interactive visualization can aid learning by enabling dynamic exploration and consideration of human impacts, thereby fostering systems thinking. ","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"mina.mani@liu.se","is_corresponding":true,"name":"Mina Mani"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"konrad.schonborn@liu.se","is_corresponding":false,"name":"Konrad J Sch\u00f6nborn"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-eduvis-1025","image_caption":"","keywords":[],"open_access_supplemental_link":"https://nightingaledvs.com/tracing-carbon-visualization-for-systems-thinking/","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=1h10m22s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T13:10:00Z","title":"Tracing Carbon: Visualization for Systems Thinking","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1018","abstract":"In this article, we discuss an experience with design and situated learning in the Creative Data Visualization course, part of the Visual Communication Design undergraduate program at the Federal University of Rio de Janeiro, a free, public Brazilian university that, thanks to affirmative action policies, has become more inclusive over the years. We begin with a brief introduction to the terms Situated Knowledge, coined by Donna Haraway, Situated Design, based on the former concept, and Situated Learning. We then examine the similarities and differences between these notions and the term Situated Visualization to present a model for the concept of Situated Learning in Information Visualization. Following this foundation, we describe the applied methodology, emphasizing the importance of integrating real-world contexts into students\u2019 projects. As a case study, we present three student projects produced as final assignments for the course. Through this article, we aim to underscore the articulation of situated design concepts in information visualization activities and contribute to teaching and learning practices in this field, particularly within the Global South.","accessible_pdf":false,"authors":[{"affiliations":["Universidade Federal do Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"doriskos@eba.ufrj.br","is_corresponding":false,"name":"Doris Kosminsky"},{"affiliations":["Federal University of Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"renata.perim@ufrj.br","is_corresponding":false,"name":"Renata Perim Lopes"},{"affiliations":["UFRJ, RJ, Brazil","IBGE, RJ, Brazil"],"email":"regina.reznik@ufrj.br","is_corresponding":false,"name":"Regina Reznik"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1018","image_caption":"The image displays a diagram on the left side of the page, featuring four nested circles, each symbolizing a stage in the Situated Learning model for information visualization. The outermost circle is labeled \"situated contexts,\" linked to \"location,\" covering space, time, place, activity, and social aspects. The second circle, \"collecting data,\" is connected to \"embodied skills.\" The third circle, \"mapping & design\", also links to \"embodied skills.\" The innermost circle is \"presentation,\" linked to \"partial view.\" The right side shows the VIS2024 conference logo.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1018/w-eduvis-1018_Preview.mp4?token=0BoqkCys97DIKZNWWtFaOEk1WrA-Y6I3NAXl2h8L-Vs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1018/w-eduvis-1018_Preview.srt?token=OeV-pHQ_GBKtBOLIFN4Aaneggy3pcOXM9z1rOXRlxQ8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"-jQLve3cCL8","session_youtube_ff_link":"https://youtu.be/-jQLve3cCL8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=1h18m42s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T14:15:00Z","title":"Teaching Information Visualization through Situated Design: Case Studies from the Classroom","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1028","abstract":"With the decreasing cost of consumer display technologies making it easier for universities to have larger displays in classrooms, and the ubiquitous use of online tools such as collaborative whiteboards for remote learning during the COVID-19 pandemic, combining the two can be useful in higher education. This is especially true in visually intensive classes, such as data visualization courses, that can benefit from additional \"space to teach,\" coined after the \"space to think\" sense-making idiom. In this paper, we reflect on our approach to using SAGE3, a collaborative whiteboard with advanced features, in higher education to teach visually intensive classes, provide examples of activities from our own visually-intensive courses, and present student feedback. We gather our observations into usage patterns for using content-rich canvases in education.","accessible_pdf":false,"authors":[{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"jessemh@vt.edu","is_corresponding":true,"name":"Jesse Harden"},{"affiliations":["University of Hawaii at Manoa, Honolulu, United States"],"email":"nuritk@hawaii.edu","is_corresponding":false,"name":"Nurit Kirshenbaum"},{"affiliations":["University of Hawaii at Manoa, Honolulu, United States"],"email":"tabalbar@hawaii.edu","is_corresponding":false,"name":"Roderick S Tabalba Jr."},{"affiliations":["University of Hawaii at Manoa, Honolulu, United States"],"email":"rtheriot@hawaii.edu","is_corresponding":false,"name":"Ryan Theriot"},{"affiliations":["The University of Hawai'i at M\u0101noa, Honolulu, United States"],"email":"mlr2010@hawaii.edu","is_corresponding":false,"name":"Michael L. Rogers"},{"affiliations":["University of Hawaii at Manoa, Honolulu, United States"],"email":"mahdi@hawaii.edu","is_corresponding":false,"name":"Mahdi Belcaid"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"north@vt.edu","is_corresponding":false,"name":"Chris North"},{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"renambot@uic.edu","is_corresponding":false,"name":"Luc Renambot"},{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"llong4@uic.edu","is_corresponding":false,"name":"Lance Long"},{"affiliations":["University of Illinois Chicago, Chicago, United States"],"email":"ajohnson@uic.edu","is_corresponding":false,"name":"Andrew E Johnson"},{"affiliations":["University of Hawaii at Manoa, Honolulu, United States"],"email":"leighj@hawaii.edu","is_corresponding":false,"name":"Jason Leigh"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1028","image_caption":"A professor using an online whiteboard, SAGE3, for an in-person class with a very large display. On the online whiteboard are multiple slides of PowerPoint slide decks, saved as PDFs, and various sticky notes from student contributions.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=1h30m26s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T14:15:00Z","title":"Space to Teach: Content-Rich Canvases for Visually-Intensive Education","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1029","abstract":"Data-art blends visualisation, data science, and artistic expression. It allows people to transform information and data into exciting and interesting visual narratives.Hosting a public data-art hands-on workshop enables participants to engage with data and learn fundamental visualisation techniques. However, being a public event, it presents a range of challenges. We outline our approach to organising and conducting a public workshop, that caters to a wide age range, from children to adults. We divide the tutorial into three sections, focusing on data, sketching skills and visualisation. We place emphasis on public engagement, and ensure that participants have fun while learning new skills.","accessible_pdf":true,"authors":[{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"j.c.roberts@bangor.ac.uk","is_corresponding":true,"name":"Jonathan C Roberts"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1029","image_caption":"Data-art blends visualisation, data science, and artistic expression. We outline our approach to organising and conducting a public workshop, that caters to a wide age range. We divide the tutorial into three sections, focusing on data, sketching skills and visualisation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.04750","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1029/w-eduvis-1029_Preview.mp4?token=IFMi0sw4B2ZjhXBaG5uL5V7PS9ZDz27LtpdpwNjm7SQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1029/w-eduvis-1029_Preview.srt?token=IORjrd025DzhF-yHbmSCWdQlImmmgO71TFEjP3jCyNI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"qO_Uj50TocQ","session_youtube_ff_link":"https://youtu.be/qO_Uj50TocQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=1h43m12s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T14:15:00Z","title":"Engaging Data-Art: Conducting a Public Hands-On Workshop","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1026","abstract":"For over half a century, science centers have been key in communicating science, aiming to increase interest and curiosity in STEM, and promote lifelong learning. Science centers integrate interactive technologies like dome displays, touch tables, VR and AR for immersive learning. Visitors can explore complex phenomena, such as conducting a virtual autopsy. Also, the shift towards digitally interactive exhibits has expanded science centers beyond physical locations to virtual spaces, extending their reach into classrooms. Our investigation revealed several key factors for impactful school visits involving interactive data visualization such as full-dome movies, provide unique perspectives about vast and microscopic phenomena. Hands-on discovery allows pupils to manipulate and investigate data, leading to deeper engagement. Collaborative interaction fosters active learning through group participation. Additionally, clear curriculum connections ensure that visits are pedagogically meaningful. We propose a three-stage model for school visits. The \"Experience\" stage involves immersive visual experiences to spark interest. The \"Engagement\" stage builds on this by providing hands-on interaction with data visualization exhibits. The \"Applicate\" stage offers opportunities to apply and create using data visualization. A future goal of the model is to broaden STEM reach, enabling pupils to benefit from data visualization experiences even if they cannot visit centers.","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping university, Norrk\u00f6ping, Sweden"],"email":"andreas.c.goransson@liu.se","is_corresponding":true,"name":"Andreas G\u00f6ransson"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"konrad.schonborn@liu.se","is_corresponding":false,"name":"Konrad J Sch\u00f6nborn"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1026","image_caption":"Example of digital science center environment at Norrk\u00f6ping Visualization Center C, Sweden. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=1h57m2s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T14:15:00Z","title":"What makes school visits to digital science centers successful?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1030","abstract":"We propose to leverage the recent development in Large Language Models, in combination to data visualization software and devices in science centers and schools in order to foster more personalized learning experiences. The main goal with our endeavour is to provide to pupils and visitors the same experience they would get with a professional facilitator when interacting with data visualizations of complex scientific phenomena. We describe the results from our early prototypes and the intended implementation and testing of our idea.","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"lonni.besancon@gmail.com","is_corresponding":false,"name":"Lonni Besan\u00e7on"},{"affiliations":["LiU Link\u00f6ping Universitet, Norrk\u00f6ping, Sweden"],"email":"mathis.brossier@liu.se","is_corresponding":true,"name":"Mathis Brossier"},{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"omar.mena@kaust.edu.sa","is_corresponding":false,"name":"Omar Mena"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"erik.sunden@liu.se","is_corresponding":false,"name":"Erik Sund\u00e9n"},{"affiliations":["Link\u00f6ping university, Norrk\u00f6ping, Sweden"],"email":"andreas.c.goransson@liu.se","is_corresponding":false,"name":"Andreas G\u00f6ransson"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"anders.ynnerman@liu.se","is_corresponding":false,"name":"Anders Ynnerman"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"konrad.schonborn@liu.se","is_corresponding":false,"name":"Konrad J Sch\u00f6nborn"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1030","image_caption":"The portable globe that we aim to bring to schools so that students can directly ask questions to it.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=2h6m34s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T16:00:00Z","title":"TellUs \u2013 Leveraging the power of LLMs with visualization to benefit science centers.","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1031","abstract":"In this reflective essay, we explore how educational science can be relevant for visualization research, addressing beneficial intersections between the two communities. While visualization has become integral to various areas, including education, our own ongoing collaboration has induced reflections and discussions we believe could benefit visualization research. In particular, we identify five key perspectives: surpassing traditional evaluation metrics by incorporating established educational measures; defining constructs based on existing learning and educational research frameworks; applying established cognitive theories to understand interpretation and interaction with visualizations; establishing uniform terminology across disciplines; and, fostering interdisciplinary convergence. We argue that by integrating educational research constructs, methodologies, and theories, visualization research can further pursue ecological validity and thereby improve the design and evaluation of visual tools. Our essay emphasizes the potential of intensified and systematic collaborations between educational scientists and visualization researchers to advance both fields, and in doing so craft visualization systems that support comprehension, retention, transfer, and critical thinking. We argue that this reflective essay serves as a first point of departure for initiating dialogue that, we hope, could help further connect educational science and visualization, by proposing future empirical studies that take advantage of interdisciplinary approaches of mutual gain to both communities.","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"konrad.schonborn@liu.se","is_corresponding":false,"name":"Konrad J Sch\u00f6nborn"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"lonni.besancon@gmail.com","is_corresponding":false,"name":"Lonni Besan\u00e7on"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1031","image_caption":"In this reflective essay, we explore how educational science can be relevant for visualization research, addressing beneficial intersections between the two communities.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/8jbmz","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3b","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/XeytaUH5Z8c&t=0h1m14s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)"],"time_stamp":"2024-10-13T16:00:00Z","title":"What Can Educational Science Offer Visualization? A Reflective Essay","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1027","abstract":"Parallel coordinate plots (PCPs) are gaining popularity in data exploration, statistical analysis, predictive analysis along with for data-driven storytelling. In this paper, we present the results of a post-hoc analysis of a dataset from a PCP literacy intervention to identify barriers to PCP literacy. We analyzed question responses and inductively identified barriers to PCP literacy. We performed group coding on each individual response and identified new barriers to PCP literacy. Based on our analysis, we present a extended and enhanced list of barriers to PCP literacy. Our findings have implications towards educational interventions targeting PCP literacy and can provide an approach for students to learn about PCPs through active learning.","accessible_pdf":false,"authors":[{"affiliations":["University of San Francisco, San Francisco, United States"],"email":"csrinivas2@dons.usfca.edu","is_corresponding":false,"name":"Chandana Srinivas"},{"affiliations":["Cukurova University, Adana, Turkey"],"email":"elifemelfirat@gmail.com","is_corresponding":false,"name":"Elif E. Firat"},{"affiliations":["University of Nottingham, Nottingham, United Kingdom"],"email":"robert.laramee@nottingham.ac.uk","is_corresponding":false,"name":"Robert S. Laramee"},{"affiliations":["University of San Francisco, San Francisco, United States"],"email":"apjoshi@usfca.edu","is_corresponding":true,"name":"Alark Joshi"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1027","image_caption":"This figure shows the methodology used to inductively identify an enhanced list of PCP literacy barriers.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://www.cs.usfca.edu/~apjoshi/papers/2024_EduVis_PCP_Barriers.pdf","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3b","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/XeytaUH5Z8c&t=0h14m20s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)"],"time_stamp":"2024-10-13T16:00:00Z","title":"An Inductive Approach for Identification of Barriers to PCP Literacy","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1010","abstract":"This report examines the implementation of the Solution Framework in a social impact project facilitated by VizForSocialGood. It outlines the data visualization process, detailing each stage and offering practical insights. The framework's application demonstrates its effectiveness in enhancing project quality, efficiency, and collaboration, making it a valuable tool for educational and professional environments.","accessible_pdf":false,"authors":[{"affiliations":["Independent Information Designer, Medellin, Colombia","Independent Information Designer, Medellin, Colombia"],"email":"munozdataviz@gmail.com","is_corresponding":false,"name":"Victor Mu\u00f1oz"},{"affiliations":["Corporate Information Designer, Arlington Hts, United States","Corporate Information Designer, Arlington Hts, United States"],"email":"hellokevinford@gmail.com","is_corresponding":false,"name":"Kevin Ford"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1010","image_caption":"This image contains chat logs of the interaction between a mentor and a mentee, implementing the Solution Framework in a social impact project. The conversations reflect collaboration and guidance in refining a data visualization, providing a practical model for practitioners to document their workflows and mentoring strategies.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3b","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/XeytaUH5Z8c&t=0h42m18s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)"],"time_stamp":"2024-10-13T16:00:00Z","title":"Implementing the Solution Framework in a Social Impact Project","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1007","abstract":"Visualizations are a critical medium not only for telling stories, but for fostering exploration.But while there are countless examples how to use visualizations for\u201cstorytelling with data,\u201d there are few guidelines on how to design visualizations for public exploration.This educator report draws on decades of work in science museums, a public context focused on designing interactive experiences for exploration, to provide evidence-based guidelines for designing exploratory visualizations.Recent studies on interactive visualizations in museums are contextualized within a larger body of museum research on designs that support exploratory learning in interactive exhibits.Synthesizing these studies highlights that to create successful exploratory visualizations, designers can apply long-standing guidelines from exhibit design but need to provide more aids for interpretation.","accessible_pdf":false,"authors":[{"affiliations":["Science Communication Lab, Berkeley, United States","University of California, San Francisco, San Francisco, United States"],"email":"jafrazier@gmail.com","is_corresponding":true,"name":"Jennifer Frazier"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1007","image_caption":"Museums visitors using an interactive visualization at the Exploratorium (image credit: Amy Snyder).","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3b","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/XeytaUH5Z8c&t=0h48m37s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)"],"time_stamp":"2024-10-13T16:00:00Z","title":"Beyond storytelling with data: Guidelines for designing exploratory visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-pdav-1006","abstract":"The healthcare system collects extensive data, encompassing patient administrative information, clinical measurements, and home-monitored health metrics. To support informed decision-making in patient care and treatment management, it is essential to review and analyze these diverse data sources. Data visualization is a promising solution to navigate healthcare datasets, uncover hidden patterns, and derive actionable insights. However, the process of creating interactive data visualization can be rather challenging due to the size and complexity of these datasets. Progressive data science offers a potential solution, enabling interaction with intermediate results during data exploration. In this paper, we reflect on our experiences with three health data visualization projects employing a progressive data science approach. We explore the practical implications and challenges faced at various stages, including data selection, pre-processing, data mining, transformation, and interpretation and evaluation.We highlighted unique challenges and opportunities for three projects, including visualizing surgical outcomes, tracking patient bed transfers, and integrating patient-generated data visualizations into the healthcare setting.We identified the following challenges: inconsistent data collection practices, the complexity of adapting to varying data completeness levels, and the need to modify designs for real-world deployment. Our findings underscore the need for careful consideration of using a progressive data science approach when designing visualizations for healthcare settings.","accessible_pdf":false,"authors":[{"affiliations":["Carleton University, Ottawa, Canada"],"email":"faisalzaki@cmail.carleton.ca","is_corresponding":false,"name":"Faisal Zaki Roshan"},{"affiliations":["Carleton University, Ottawa, Canada"],"email":"abhishekahuja@cmail.carleton.ca","is_corresponding":false,"name":"Abhishek Ahuja"},{"affiliations":["Carleton University, Ottawa, Canada"],"email":"fateme.rajabiyazdi@carleton.ca","is_corresponding":true,"name":"Fateme Rajabiyazdi"}],"award":"","doi":"","event_id":"w-pdav","event_title":"Progressive Data Analysis and Visualization (PDAV) Workshop.","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-pdav-1006","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop4","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Progressive Data Analysis and Visualization (PDAV) Workshop","session_uid":"w-pdav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Progressive Data Analysis and Visualization (PDAV) Workshop"],"time_stamp":"2024-10-14T12:30:00Z","title":"Practical Challenges of Progressive Data Science in Healthcare","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-pdav-1009","abstract":"In a world where data has become too large for direct human perception, scientists have developed methods for specific data exploration. Until recently, two main methodologies were used for their exploration: scientific visualization (SciVis) for data with inherent geometry (simulation/acquisition) and information visualization (InfoVis) for abstract data. Though these fields evolved in parallel, sharing journals and conferences, they had distinct challenges, methodologies, and experts. Recently, a visible transition has begun, with the two communities converging, exemplified by IEEE VIS conference removing distinct categories. In this context, we propose a high-level discussion on an open-source framework widely used in SciVis and how progressive processing and visualization could help bringing its abilities to InfoVis.","accessible_pdf":false,"authors":[{"affiliations":["Kitware SAS, Lyon, France"],"email":"charles.gueunet@kitware.com","is_corresponding":true,"name":"Charles Gueunet"},{"affiliations":["Kitware Europe, Villeurbanne, France"],"email":"francois.mazen@kitware.com","is_corresponding":false,"name":"Fran\u00e7ois Mazen"}],"award":"","doi":"","event_id":"w-pdav","event_title":"Progressive Data Analysis and Visualization (PDAV) Workshop.","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-pdav-1009","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop4","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Progressive Data Analysis and Visualization (PDAV) Workshop","session_uid":"w-pdav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Progressive Data Analysis and Visualization (PDAV) Workshop"],"time_stamp":"2024-10-14T12:30:00Z","title":"Towards a Progressive Open Source Framework for SciVis and InfoVis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-pdav-1010","abstract":"Progressive dimensionality reduction algorithms allow for visually investigating intermediate results, especially for large data sets. While different algorithms exist that progressively increase the number of data points, we propose an algorithm that allows for increasing the number of dimensions. Especially in spatio-temporal data, where each spatial location can be seen as one data point and each time step as one dimension, the data is often stored in a format that supports quick access to the individual dimensions of all points. Therefore, we propose Progressive Glimmer, a progressive multidimensional scaling (MDS) algorithm. We adapt the Glimmer algorithm to support progressive updates for changes in the data's dimensionality. We evaluate Progressive Glimmer's embedding quality and runtime. We observe that the algorithm provides more stable results, leading to visually consistent results for progressive rendering and making the approach applicable to streaming data. We show the applicability of our approach to spatio-temporal simulation ensemble data where we add the individual ensemble members progressively.","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"m_ever14@uni-muenster.de","is_corresponding":true,"name":"Marina Evers"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"david.haegele@visus.uni-stuttgart.de","is_corresponding":false,"name":"David H\u00e4gele"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"st142532@stud.uni-stuttgart.de","is_corresponding":false,"name":"S\u00f6ren D\u00f6ring"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"}],"award":"","doi":"","event_id":"w-pdav","event_title":"Progressive Data Analysis and Visualization (PDAV) Workshop.","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-pdav-1010","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop4","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Progressive Data Analysis and Visualization (PDAV) Workshop","session_uid":"w-pdav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Progressive Data Analysis and Visualization (PDAV) Workshop"],"time_stamp":"2024-10-14T12:30:00Z","title":"Progressive Glimmer: Expanding Dimensionality in Multidimensional Scaling","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1007","abstract":"Symmetric second-order tensors are fundamental in various scientific and engineering domains, as they can represent properties such as material stresses or diffusion processes in brain tissue. In recent years, several approaches have been introduced and improved to analyze these fields using topological features, such as degenerate tensor locations, i.e., the tensor has repeated eigenvalues, or normal surfaces. Traditionally, the identification of such features has been limited to single tensor fields. However, it has become common to create ensembles to account for uncertainties and variability in simulations and measurements. In this work, we explore novel methods for describing and visualizing degenerate tensor locations in 3D symmetric second-order tensor field ensembles. We base our considerations on the tensor mode and analyze its practicality in characterizing the uncertainty of degenerate tensor locations before proposing a variety of visualization strategies to effectively communicate degenerate tensor information. We demonstrate our techniques for synthetic and simulation data sets.The results indicate that the interplay of different descriptions for uncertainty can effectively convey information on degenerate tensor locations.","accessible_pdf":true,"authors":[{"affiliations":["University of Cologne, Cologne, Germany"],"email":"tadea.schmitz@uni-koeln.de","is_corresponding":false,"name":"Tadea Schmitz"},{"affiliations":["RWTH Aachen University, Aachen, Germany"],"email":"gerrits@vis.rwth-aachen.de","is_corresponding":true,"name":"Tim Gerrits"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1007","image_caption":"Uncertainty visualizations for eight simulation results describing stresses in an O-ring with varying anisotropy parameter. The degenrate tensor lines of all ensembles members are shown in green, while the color-coded meanLine shows the locations of degenrate tensors within the mean tensor field and standard deviation of mode values. The yellow probabilityBand indicates locations where mode values have a probability of 25% of a mode value larger or equal to 0.99.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.08099","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-uncertainty/w-uncertainty-1007/w-uncertainty-1007_Preview.mp4?token=8rfUf11SCsATf7UzC6-xDYLlbQCurLQyjEYw9YyHGNI&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"Fw4FjoRpBtE","session_youtube_ff_link":"https://youtu.be/Fw4FjoRpBtE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Exploring Uncertainty Visualization for Degenerate Tensors in 3D Symmetric Second-Order Tensor Field Ensembles","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1017","abstract":"Uncertainty visualization is a key component in translating important insights from ensemble data into actionable decision-making by visually conveying various aspects of uncertainty withina system. With the recent advent of fast surrogate models for computationally expensive simulations, users can interact with more aspects of data spaces than ever before. However, the integration of ensemble data with surrogate models in a decision-making tool brings up new challenges for uncertainty visualization, namely how to reconcile and communicate the new and different types of uncertainties brought in by surrogates and how to utilize these new data estimates in actionable ways. In this work, we examine these issues as they relate to high-dimensional data visualization, the integration of discrete datasets and the continuous representations of those datasets, and the unique difficulties associated with systems that allow users to iterate between input and output spaces. We assess the role of uncertainty visualization in facilitating intuitive and actionable interaction with ensemble data and surrogate models, and highlight key challenges in this new frontier of computational simulation.","accessible_pdf":true,"authors":[{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"sam.molnar@nrel.gov","is_corresponding":true,"name":"Sam Molnar"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"jd.laurencechasen@nrel.gov","is_corresponding":false,"name":"J.D. Laurence-Chasen"},{"affiliations":["The Ohio State University, Columbus, United States","National Renewable Energy Lab, Golden, United States"],"email":"duan.418@osu.edu","is_corresponding":false,"name":"Yuhan Duan"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"julie.bessac@nrel.gov","is_corresponding":false,"name":"Julie Bessac"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"kristi.potter@nrel.gov","is_corresponding":false,"name":"Kristi Potter"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1017","image_caption":"The relationship between ensemble datasets and surrogates. Parameters (left) and outputs (right) in solid rectangles represent realizations from an ensemble dataset. A forward surrogate (top) enables a user to propose novel parameter settings and predict output variables, along with quantified uncertainty relating to how close those predictions get to the original ensemble outputs. A reverse surrogate (bottom) allows the user to choose output values and determine possible input parameters that will get within a range of that proposed output. We assess the role of uncertainty visualization in facilitating intuitive and actionable interaction with ensemble data and surrogate models, and highlight key challenges in this new frontier of computational simulation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Uncertainty Visualization Challenges in Decision Systems with Ensemble Data & Surrogate Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1009","abstract":"Understanding and communicating data uncertainty is crucial for informed decision-making across various domains, including finance, healthcare, and public policy. This study investigates the impact of gender and acoustic variables on decision-making, confidence, and trust through a crowdsourced experiment. We compared visualization-only representations of uncertainty to text-forward and speech-forward bimodal representations, including multiple synthetic voices across gender. Speech-forward representations led to an increase in risky decisions, and text-forward representations led to lower confidence. Contrary to prior work, speech-forward forecasts did not receive higher ratings of trust. Higher normalized pitch led to a slight increase in decision confidence, but other voice characteristics had minimal impact on decisions and trust. An exploratory analysis of accented speech showed consistent results with the main experiment and additionally indicated lower trust ratings for information presented in Indian and Kenyan accents. The results underscore the importance of considering acoustic and contextual factors in presentation of data uncertainty.","accessible_pdf":false,"authors":[{"affiliations":["University of California Berkeley, Berkeley, United States"],"email":"chase_stokes@berkeley.edu","is_corresponding":true,"name":"Chase Stokes"},{"affiliations":["Stanford University, Stanford, United States"],"email":"sanker@stanford.edu","is_corresponding":false,"name":"Chelsea Sanker"},{"affiliations":["Versalytix, Columbus, United States"],"email":"bcogley@versalytix.com","is_corresponding":false,"name":"Bridget Cogley"},{"affiliations":["Tableau Research, Palo Alto, United States"],"email":"vsetlur@tableau.com","is_corresponding":false,"name":"Vidya Setlur"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1009","image_caption":"Example stimuli viewed by participants. (a) Visualization-only representation: a density plot showing the distribution of possible nighttime temperatures. (c) Speech-forward representation: contains the same density mark to provide some visual information, accompanied by an mp3 player which describes the distribution, temperature values, and likelihoods. We tested six different variants of these representations, with three masculine voices and three feminine voices. (c) Text-forward representation: contains the density mark and a text paragraph describing the distribution and likelihoods for different values. This is the same content as present in the speech forecast.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.08438","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-uncertainty/w-uncertainty-1009/w-uncertainty-1009_Preview.mp4?token=8Ya_rIZn7flqOlmKHILwBCvrxKdN1CX91FEvXi5cslk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-uncertainty/w-uncertainty-1009/w-uncertainty-1009_Preview.srt?token=kKlx2RhluAJIjppd-_OxtbQUL-d_Ns5VrClhkPVXEA4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"pWsB9XzF8uA","session_youtube_ff_link":"https://youtu.be/pWsB9XzF8uA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Voicing Uncertainty: How Speech, Text, and Visualizations Influence Decisions with Data Uncertainty","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1018","abstract":"Although people frequently make decisions based on uncertain forecasts about future events, there is little guidance about how best to represent the uncertainty in forecasts. One common approach is to use multiple forecast visualizations, in which multiple forecasts are plotted on the same graph. This provides an implicit representation of the uncertainty in the data, but it is not clear how many forecasts to show, or how viewers might be influenced by seeing the more extreme forecasts rather than those closer to the mean. In this study, we showed participants forecasts of wind speed data and they made decisions based on their predictions about the future wind speed. We allowed participants to choose how many forecasts to view prior to making a decision, and we manipulated the ordering of the forecasts and the cost of each additional forecast. We found that participants viewed more forecasts when the outcome was more ambiguous. The order of the forecasts had little impact on their decisions when there was no cost for the additional information. However, when there was a cost for each forecast, the participants were much more likely to make a guess based on only the first forecast shown. In this case, showing one of the extreme forecasts first led to less optimal decisions.","accessible_pdf":true,"authors":[{"affiliations":["Sandia National Laboratories, Albuquerque, United States"],"email":"lematze@sandia.gov","is_corresponding":true,"name":"Laura Matzen"},{"affiliations":["Sandia National Laboratories, Albuquerque, United States"],"email":"mcstite@sandia.gov","is_corresponding":false,"name":"Mallory C Stites"},{"affiliations":["Sandia National Laboratories, Albuquerque, United States"],"email":"kmdivis@sandia.gov","is_corresponding":false,"name":"Kristin M Divis"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"abendeck3@gatech.edu","is_corresponding":false,"name":"Alexander Bendeck"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"john.stasko@cc.gatech.edu","is_corresponding":false,"name":"John Stasko"},{"affiliations":["Northeastern University, Boston, United States"],"email":"l.padilla@northeastern.edu","is_corresponding":false,"name":"Lace M. Padilla"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1018","image_caption":"In this experiment, participants made decisions based on wind speed forecasts shown in multiple forecast visualizations. They saw one forecast to start, but could add up to 19 more forecasts to the plot, one at a time, prior to making their decisions. We manipulated the risk of the situation (the percentage of forecasts crossing the critical threshold of 50 miles per hour), the order in which the first three forecasts in the set appeared, and the cost of obtaining additional forecasts. This figure shows examples of the stimuli, each displaying three forecasts, at different levels of the Percent Crossing manipulation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/vhs7w/","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Effects of Forecast Number, Order, and Cost in Multiple Forecast Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1015","abstract":"Functional depth is a well-known technique used to derive descriptive statistics (e.g., median, quartiles, and outliers) for 1D data. Surface boxplots extend this concept to ensembles of images, helping scientists and users identify representative and outlier images. However, the computational time for surface boxplots increases cubically with the number of ensemble members, making it impractical for integration into visualization tools.In this paper, we propose a deep-learning solution for efficient depth prediction and computation of surface boxplots for time-varying ensemble data. Our deep learning framework accurately predicts member depths in a surface boxplot, achieving average speedups of 6X on a CPU and 15X on a GPU for the 2D Red Sea dataset with 50 ensemble members compared to the traditional depth computation algorithm. Our approach achieves at least a 99\\% level of rank preservation, with order flipping occurring only at pairs with extremely similar depth values that pose no statistical differences. This local flipping does not significantly impact the overall depth order of the ensemble members.","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"mengjiao@sci.utah.edu","is_corresponding":true,"name":"Mengjiao Han"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":false,"name":"Tushar M. Athawale"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jixianli@sci.utah.edu","is_corresponding":false,"name":"Jixian Li"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1015","image_caption":"Functional depth is a valuable technique for analyzing uncertainty of 1D data, and surface boxplots extend this concept to image ensembles, aiding in identifying representative and outlier images. However, the high computational cost limits their usability. This paper introduces a deep-learning framework for efficient surface boxplot computation in time-varying ensemble data. Our method accelerates depth prediction, achieving up to 15X speedups on a GPU while maintaining 99% rank preservation accuracy, making it a practical solution for integrating surface boxplots into visualization tools.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Accelerated Depth Computation for Surface Boxplots with Deep Learning","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1012","abstract":"Uncertainty visualization is an emerging research topic in data vi- sualization because neglecting uncertainty in visualization can lead to inaccurate assessments. In this short paper, we study the prop- agation of multivariate data uncertainty in visualization. Although there have been a few advancements in probabilistic uncertainty vi- sualization of multivariate data, three critical challenges remain to be addressed. First, state-of-the-art probabilistic uncertainty visual- ization framework is limited to bivariate data (two variables). Sec- ond, the existing uncertainty visualization algorithms use compu- tationally intensive techniques and lack support for cross-platform portability. Third, as a consequence of the computational expense, integration into interactive production visualization tools is imprac- tical. In this work, we address all three issues and make a threefold contribution. First, we generalize the state-of-the-art probabilis- tic framework for bivariate data to multivariate data with a arbi- trary number of variables. Second, through utilization of VTK-m\u2019s shared-memory parallelism and cross-platform compatibility fea- tures, we demonstrate acceleration of multivariate uncertainty visu- alization on different many-core architectures, including OpenMP and AMD GPUs. Third, we demonstrate the integration of our al- gorithms with the ParaView software. We demonstrate utility of our algorithms through experiments on multivariate simulation data.","accessible_pdf":false,"authors":[{"affiliations":["Indiana University Bloomington, Bloomington, United States"],"email":"gautamhari@outlook.com","is_corresponding":true,"name":"Gautam Hari"},{"affiliations":["Indiana University Bloomington, Bloomington, United States"],"email":"nrushad2001@gmail.com","is_corresponding":false,"name":"Nrushad A Joshi"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"jay.wang@rutgers.edu","is_corresponding":false,"name":"Zhe Wang"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"gongq@ornl.gov","is_corresponding":false,"name":"Qian Gong"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"pugmire@ornl.gov","is_corresponding":false,"name":"David Pugmire"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"kmorel@acm.org","is_corresponding":false,"name":"Kenneth Moreland"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"klasky@ornl.gov","is_corresponding":false,"name":"Scott Klasky"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"pnorbert@ornl.gov","is_corresponding":false,"name":"Norbert Podhorszki"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":false,"name":"Tushar M. Athawale"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1012","image_caption":"A simulation of the Deep Water Impact. From Left to Right, the images are a) Original Dataset, b) Compressed data without uncertainty, and c) Compressed data with uncertainty. The colors of the Uncertainty image range from transparent deep purple regions that indicate positions of lower probability, whereas the less transparent bright yellow regions indicate positions of higher probability. Uncertainty visualization recovers key topological structures, such as the rib-like formations (e.g., rib-like structure in the inset views), which appear broken in traditional mean-field visualization. This probabilistic approach of uncertainty visualization allows for the recovery of potentially important features in uncertain data.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-uncertainty/w-uncertainty-1012/w-uncertainty-1012_Preview.mp4?token=fRsU_limNTdX9nAIOhU27Jqv9DG9Bf1P4Bu9Hk_CLCg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-uncertainty/w-uncertainty-1012/w-uncertainty-1012_Preview.srt?token=CRT8diSvS6Vaelc4Nun96O8lftyDKhnf9SjCVXAyuog&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"QH7dbVdSO3I","session_youtube_ff_link":"https://youtu.be/QH7dbVdSO3I","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"FunM^2C: A Filter for Uncertainty Visualization of Multivariate Data on Multi-Core Devices","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1011","abstract":"Current research provides methods to communicate uncertainty and adapts classical algorithms of the visualization pipeline to take the uncertainty into account. Various existing visualization frameworks include methods to present uncertain data but do not offer transformation techniques tailored to uncertain data. Therefore, we propose a software package for uncertainty-aware data analysis in Python (UADAPy) offering methods for uncertain data along the visualization pipeline.We aim to provide a platform that is the foundation for further integration of uncertainty algorithms and visualizations. It provides common utility functionality to support research in uncertainty-aware visualization algorithms and makes state-of-the-art research results accessible to the end user. The project is available at https://github.com/UniStuttgart-VISUS/uadapy.","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"patrick.paetzold@uni-konstanz.de","is_corresponding":true,"name":"Patrick Paetzold"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"david.haegele@visus.uni-stuttgart.de","is_corresponding":false,"name":"David H\u00e4gele"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"m_ever14@uni-muenster.de","is_corresponding":false,"name":"Marina Evers"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"oliver.deussen@uni-konstanz.de","is_corresponding":false,"name":"Oliver Deussen"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1011","image_caption":"The UADAPy software package is a toolbox providing high-dimensional uncertain sample data sets, uncertainty-aware data transformations and analysis methods, and visualization methods tailored to show uni- and multivariate sets of probability distributions.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"UADAPy: An Uncertainty-Aware Visualization and Analysis Toolbox","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1014","abstract":"Isosurface visualization is fundamental for exploring and analyzing 3D volumetric data. Marching cubes (MC) algorithms with linear interpolation are commonly used for isosurface extraction and visualization.Although linear interpolation is easy to implement, it has limitations when the underlying data is complex and high-order, which is the case for most real-world data. Linear interpolation can output vertices at the wrong location. Its inability to deal with sharp features and features smaller than grid cells can create holes and broken pieces in the extracted isosurface. Despite these limitations, isosurface visualizations typically do not include insight into the spatial location and the magnitude of these errors. We utilize high-order interpolation methods with MC algorithms and interactive visualization to highlight these uncertainties. Our visualization tool helps identify the regions of high interpolation errors. It also allows users to query local areas for details and compare the differences between isosurfaces from different interpolation methods. In addition, we employ high-order methods to identify and reconstruct possible features that linear methods cannot detect.We showcase how our visualization tool helps explore and understand the extracted isosurface errors through synthetic and real-world data.","accessible_pdf":true,"authors":[{"affiliations":["Scientific Computing and Imaging Institute, Salk Lake City, United States"],"email":"touermi@sci.utah.edu","is_corresponding":true,"name":"Timbwaoga A. J. Ouermi"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jixianli@sci.utah.edu","is_corresponding":false,"name":"Jixian Li"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":false,"name":"Tushar M. Athawale"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1014","image_caption":"Our proposed visualization system highlights errors introduced by linear interpolation methods and allows users to query local vertex differences between interpolation methods. The first column shows the approximated isosurface uncertainty and local selection using the colormap and transparent box, respectively. The second column shows the differences between linear and cubic, linear and WENO, and the approximated error for each vertex inside the transparent boxes. The third column shows a global comparison between linear and WENO. The fourth and fifth columns show a comparison between isosurfaces with (transparent orange) and without (opaque blue) possible hidden features that indicate isosurface feature uncertainty.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.00043","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Estimation and Visualization of Isosurface Uncertainty from Linear and High-Order Interpolation Methods","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1010","abstract":"The increasing adoption of Deep Neural Networks (DNNs) has led to their application in many challenging scientific visualization tasks. While advanced DNNs offer impressive generalization capabilities, understanding factors such as model prediction quality, robustness, and uncertainty is crucial. These insights can enable domain scientists to make informed decisions about their data. However, DNNs inherently lack ability to estimate prediction uncertainty, necessitating new research to construct robust uncertainty-aware visualization techniques tailored for various visualization tasks. In this work, we propose uncertainty-aware implicit neural representations to model scalar field data sets effectively and comprehensively study the efficacy and benefits of estimated uncertainty information for volume visualization tasks. We evaluate the effectiveness of two principled deep uncertainty estimation techniques: (1) Deep Ensemble and (2) Monte Carlo Dropout (MCDropout). These techniques enable uncertainty-informed volume visualization in scalar field data sets. Our extensive exploration across multiple data sets demonstrates that uncertainty-aware models produce informative volume visualization results. Moreover, integrating prediction uncertainty enhances the trustworthiness of our DNN model, making it suitable for robustly analyzing and visualizing real-world scientific volumetric data sets.","accessible_pdf":false,"authors":[{"affiliations":["IIT kanpur , Kanpur , India"],"email":"saklanishanu@gmail.com","is_corresponding":false,"name":"Shanu Saklani"},{"affiliations":["Indian Institute of Technology Kanpur, Kanpur, India"],"email":"chitwangoel1010@gmail.com","is_corresponding":false,"name":"Chitwan Goel"},{"affiliations":["Indian Institute of Technology Kanpur, Kanpur, India"],"email":"shrey.bansal75@gmail.com","is_corresponding":false,"name":"Shrey Bansal"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"jay.wang@rutgers.edu","is_corresponding":false,"name":"Zhe Wang"},{"affiliations":["Indian Institute of Technology Kanpur (IIT Kanpur), Kanpur, India"],"email":"soumya.cvpr@gmail.com","is_corresponding":false,"name":"Soumya Dutta"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":false,"name":"Tushar M. Athawale"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"pugmire@ornl.gov","is_corresponding":false,"name":"David Pugmire"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1010","image_caption":"Showcasing how uncertainty-aware deep learning models produce informative and reliable volume rendering results. Furthermore, the results demonstrate how prediction uncertainty in volume rendering can be quantified and communicated to domain scientists, aiding in the interpretation of deep learning model-generated outcomes.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.06018","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Uncertainty-Informed Volume Visualization using Implicit Neural Representation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1013","abstract":"Uncertainty is inherent to most data, including vector field data, yet it is often omitted in visualizations and representations. Effective uncertainty visualization can enhance the understanding and interpretability of vector field data. For instance, in the context of severe weather events such as hurricanes and wildfires, effective uncertainty visualization can provide crucial insights about fire spread or hurricane behavior and aid in resource management and risk mitigation. Glyphs are commonly used for representing vector uncertainty but are often limited to 2D. In this work, we present a glyph-based technique for accurately representing 3D vector uncertainty and a comprehensive framework for visualization, exploration, and analysis using our new glyphs. We employ hurricane and wildfire examples to demonstrate the efficacy of our glyph design and visualization tool in conveying vector field uncertainty.","accessible_pdf":true,"authors":[{"affiliations":["Scientific Computing and Imaging Institute, Salk Lake City, United States"],"email":"touermi@sci.utah.edu","is_corresponding":true,"name":"Timbwaoga A. J. Ouermi"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jixianli@sci.utah.edu","is_corresponding":false,"name":"Jixian Li"},{"affiliations":["Sandia National Laboratories, Albuquerque, United States"],"email":"zbmorro@sandia.gov","is_corresponding":false,"name":"Zachary Morrow"},{"affiliations":["Sandia National Laboratories, Albuquerque, United States"],"email":"bartv@sandia.gov","is_corresponding":false,"name":"Bart van Bloemen Waanders"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1013","image_caption":"3D vector uncertainty glyph. The glyphs' direction corresponds to the median vector direction. The cone glyph encodes angle variation and maximum vector length but omits magnitude variation. The comet glyph includes the magnitude variation and minimum magnitude. However, these variations are not easily discernible. While both the tailed-disc and squid distinguish these uncertainties, the small arrow size and rotational symmetry of the tailed-disc limit the perception. Our proposed squid glyph effectively distinguishes between magnitude and direction variations. Additionally, it employs superellipses (2D superquadrics) to better approximate directional variations, eliminate rotational ambiguity, and improve overall accuracy.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"http://arxiv.org/abs/2409.00042","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Glyph-Based Uncertainty Visualization and Analysis of Time-Varying Vector Field","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1019","abstract":"We present a simple comparative framework for testing and developing uncertainty modeling in uncertain marching cubes implementations. The selection of a model to represent the probability distribution of uncertain values directly influences the memory use, run time, and accuracy of an uncertainty visualization algorithm. We use an entropy calculation directly on ensemble data to establish an expected result and then compare the entropy from various probability models, including uniform, Gaussian, histogram, and quantile models. Our results verify that models matching the distribution of the ensemble indeed match the entropy. We further show that fewer bins in nonparametric histogram models are more effective whereas large numbers of bins in quantile models approach data accuracy.","accessible_pdf":true,"authors":[{"affiliations":["University of Illinois Urbana-Champaign, Urbana, United States"],"email":"sisneros@illinois.edu","is_corresponding":true,"name":"Robert Sisneros"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":false,"name":"Tushar M. Athawale"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"kmorel@acm.org","is_corresponding":false,"name":"Kenneth Moreland"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"pugmire@ornl.gov","is_corresponding":false,"name":"David Pugmire"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1019","image_caption":"Representative test/result from our framework (wind dataset ensemble created via random uniform noise). The entropy for the full distribution model matches closely to the uniform distribution assumption (red boxes) and the minimum entropy with the Gaussian assumption may not always be the best representative.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"An Entropy-Based Test and Development Framework for Uncertainty Modeling in Level-Set Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1016","abstract":"Wildfire poses substantial risks to our health, environment, and economy. Studying wildfire is challenging due to its complex inter- action with the atmosphere dynamics and the terrain. Researchers have employed ensemble simulations to study the relationship be- tween variables and mitigate uncertainties in unpredictable initial conditions. However, many domain scientists are unaware of the advanced visualization tools available for conveying uncertainty. To bring some uncertainty visualization techniques, we build an interactive visualization system that utilizes a band-depth-based method that provides a statistical summary and visualization for fire front contours from the ensemble. We augment the visualiza- tion system with capabilities to study wildfires as a dynamic system. In this paper, We demonstrate how our system can support domain scientists in studying fire spread patterns, identifying outlier simu- lations, and navigating to interesting instances based on a summary of events.","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jixianli@sci.utah.edu","is_corresponding":true,"name":"Jixian Li"},{"affiliations":["Scientific Computing and Imaging Institute, Salk Lake City, United States"],"email":"touermi@sci.utah.edu","is_corresponding":false,"name":"Timbwaoga A. J. Ouermi"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1016","image_caption":"We introduce our interactive interface for visualizing uncertainties of ensemble wildfire simulations. Our interface uses the contour boxplot to summarize the trend and variations of fire spreading patterns. Our interface also supports transfer-function-based color and opacity mapping for visualizing scalar functions from wildfire simulations, glyph- and streamline-based wind visualization, temporal events summary, contour band depths, spatial query for the fire arrival time (red sphere in the terrain shows the query point)","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Visualizing Uncertainties in Ensemble Wildfire Forecast Simulations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-storygenai-5237","abstract":"Communicating data insights in an accessible and engaging manner to a broader audience remains a significant challenge. To address this problem, we introduce the Emoji Encoder, a tool that generates a set of emoji recommendations for the field and category names appearing in a tabular dataset. The selected set of emoji encodings can be used to generate configurable unit charts that combine plain text and emojis as word-scale graphics. These charts can serve to contrast values across multiple quantitative fields for each row in the data or to communicate trends over time. Any resulting chart is simply a block of text characters, meaning that it can be directly copied into a text message or posted on a communication platform such as Slack or Teams. This work represents a step toward our larger goal of developing novel, fun, and succinct data storytelling experiences that engage those who do not identify as data analysts. Emoji-based unit charts can offer contextual cues related to the data at the center of a conversation on platforms where emoji-rich communication is typical.","accessible_pdf":false,"authors":[{"affiliations":["University of Waterloo, Waterloo, Canada","Tableau Research, Seattle, United States"],"email":"mbrehmer@uwaterloo.ca","is_corresponding":true,"name":"Matthew Brehmer"},{"affiliations":["Tableau Research, Palo Alto, United States"],"email":"vsetlur@tableau.com","is_corresponding":false,"name":"Vidya Setlur"},{"affiliations":["McGraw Hill, Seattle, United States","Tableau Software, Seattle, United States"],"email":"zoezoezoe.cc@gmail.com","is_corresponding":false,"name":"Zoe Zoe"},{"affiliations":["Northeastern University, Portland, United States"],"email":"m.correll@northeastern.edu","is_corresponding":false,"name":"Michael Correll"}],"award":"","doi":"","event_id":"w-storygenai","event_title":"Workshop on Data Storytelling in an Era of Generative AI","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-storygenai-5237","image_caption":"The EMOJI ENCODER is an interactive chart authoring interface for Tableau that generates emoji representations based on field names and the values of categorical fields. In this example, an emoji pictograph depicts flood risk values across the Netherlands along with the number and type of employees in each province, shown in a Slack Message, or, because emojis are simply Unicode Characters, in this caption:\ud83c\udfd9\ufe0f \ud83d\udd35 Drenthe \ud83c\udfd9\ufe0f \ud83d\udd35 Flevoland \ud83c\udfd9\ufe0f \ud83d\udd35 Friesland \ud83c\udfd9\ufe0f \ud83d\udd34 Gelderland \ud83c\udfd9\ufe0f \ud83d\udd35 Groningen \ud83c\udfd9\ufe0f \ud83d\udd35 Limburg \ud83c\udfd9\ufe0f \u26aa\ufe0f \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc North Brabant \ud83c\udfd9\ufe0f \ud83d\udd35 \ud83c\udfe2 \ud83c\udfe2 \ud83c\udfe2 \ud83c\udfe2 \ud83c\udfe2 North Holland \ud83c\udfd9\ufe0f \ud83d\udd35 Overijssel \ud83c\udfd9\ufe0f \ud83d\udd34 \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc South Holland \ud83c\udfd9\ufe0f \u26aa\ufe0f \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc Utrecht \ud83c\udfd9\ufe0f \ud83d\udd35 Zeeland ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://www.arxiv.org/abs/2408.13418","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop6","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Workshop on Data Storytelling in an Era of Generative AI","session_uid":"w-storygenai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Workshop on Data Storytelling in an Era of Generative AI"],"time_stamp":"2024-10-13T16:00:00Z","title":"The Data-Wink Ratio: Emoji Encoder for Generating Semantically-Resonant Unit Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-storygenai-6168","abstract":"Data-driven storytelling serves as a crucial bridge for communicating ideas in a persuasive way. However, the manual creation of data stories is a multifaceted, labor-intensive, and case-specific effort, limiting their broader application. As a result, automating the creation of data stories has emerged as a significant research thrust. Despite advances in Artificial Intelligence, the systematic generation of data stories remains challenging due to their hybrid nature: they must frame a perspective based on a seed idea in a top-down manner, similar to traditional storytelling, while coherently grounding insights of given evidence in a bottom-up fashion, akin to data analysis. These dual requirements necessitate precise constraints on the permissible space of a data story. In this viewpoint, we propose integrating constraints into the data story generation process. Defined upon the hierarchies of interpretation and articulation, constraints shape both narrations and illustrations to align with seed ideas and contextualized evidence. We identify the taxonomy and required functionalities of these constraints. Although constraints can be heterogeneous and latent, we explore the potential to represent them in a computation-friendly fashion via Domain-Specific Languages. We believe that leveraging constraints will balance the artistic and engineering aspects of data story generation.","accessible_pdf":true,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"yu.zhe.s.shi@gmail.com","is_corresponding":false,"name":"Yu-Zhe Shi"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"haotian.li@connect.ust.hk","is_corresponding":true,"name":"Haotian Li"},{"affiliations":["Peking University, Beijing, China"],"email":"ruanlecheng@whai.pku.edu.cn","is_corresponding":false,"name":"Lecheng Ruan"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"","event_id":"w-storygenai","event_title":"Workshop on Data Storytelling in an Era of Generative AI","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-storygenai-6168","image_caption":"The architecture of data-driven storytelling with hierarchical constraints. We present intuitive illustrations of the representations with blocks (see Sec. 3.3). The colors highlighting textual narratives and visual illustrations are encoded according to their respective constraints.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop6","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Workshop on Data Storytelling in an Era of Generative AI","session_uid":"w-storygenai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Workshop on Data Storytelling in an Era of Generative AI"],"time_stamp":"2024-10-13T16:00:00Z","title":"Constraint representation towards precise data-driven storytelling","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-storygenai-7043","abstract":"Creating data stories from raw data is challenging due to humans\u2019 limited attention spans and the need for specialized skills. Recent advancements in large language models (LLMs) offer great opportunities to develop systems with autonomous agents to streamline the data storytelling workflow. Though multi-agent systems have benefits such as fully realizing LLM potentials with decomposed tasks for individual agents, designing such systems also faces challenges in task decomposition, performance optimization for sub-tasks, and workflow design. To better understand these issues, we develop Data Director, an LLM-based multi-agent system designed to automate the creation of animated data videos, a representative genre of data stories. Data Director interprets raw data, breaks down tasks, designs agent roles to make informed decisions automatically, and seamlessly integrates diverse components of data videos. A case study demonstrates Data Director\u2019s effectiveness in generating data videos. Throughout development, we have derived lessons learned from addressing challenges, guiding further advancements in autonomous agents for data storytelling. We also shed light on future directions for global optimization, human-in-the-loop design, and the application of advanced multi-modal LLMs.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"lshenaj@connect.ust.hk","is_corresponding":true,"name":"Leixian Shen"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"haotian.li@connect.ust.hk","is_corresponding":false,"name":"Haotian Li"},{"affiliations":["Microsoft, Beijing, China"],"email":"yunvvang@gmail.com","is_corresponding":false,"name":"Yun Wang"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"","event_id":"w-storygenai","event_title":"Workshop on Data Storytelling in an Era of Generative AI","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-storygenai-7043","image_caption":"Architecture of Data Director, which is an LLM-based multi-agent system for automatic animated data video creation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop6","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Workshop on Data Storytelling in an Era of Generative AI","session_uid":"w-storygenai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Workshop on Data Storytelling in an Era of Generative AI"],"time_stamp":"2024-10-13T16:00:00Z","title":"From Data to Story: Towards Automatic Animated Data Video Creation with LLM-based Multi-Agent Systems","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-storygenai-7072","abstract":"Crafting accurate and insightful narratives from data visualization is essential in data storytelling. Like creative writing, where one reads to write a story, data professionals must effectively ``read\" visualizations to create compelling data stories. In education, helping students develop these skills can be achieved through exercises that ask them to create narratives from data plots, demonstrating both ``show\" (describing the plot) and ``tell\" (interpreting the plot). Providing formative feedback on these exercises is crucial but challenging in large-scale educational settings with limited resources. This study explores using GPT-4o, a multimodal LLM, to generate and evaluate narratives from data plots. The LLM was tested in zero-shot, one-shot, and two-shot scenarios, generating narratives and self-evaluating their depth. Human experts also assessed the LLM's outputs. Additionally, the study developed machine learning and LLM-based models to assess student-generated narratives using LLM-generated data. Human experts validated a subset of these machine assessments. The findings highlight the potential of LLMs to support scalable formative assessment in teaching data storytelling skills, which has important implications for AI-supported educational interventions.","accessible_pdf":false,"authors":[{"affiliations":["University of Maryland Baltimore County, Baltimore, United States"],"email":"narens1@umbc.edu","is_corresponding":true,"name":"Naren Sivakumar"},{"affiliations":["University of Maryland, Baltimore County, Baltimore, United States"],"email":"lujiec@umbc.edu","is_corresponding":false,"name":"Lujie Karen Chen"},{"affiliations":["University of Maryland,Baltimore County, Baltimore, United States"],"email":"io11937@umbc.edu","is_corresponding":false,"name":"Pravalika Papasani"},{"affiliations":["University of maryland baltimore county, Hanover, United States"],"email":"vignam1@umbc.edu","is_corresponding":false,"name":"Vigna Majmundar"},{"affiliations":["Towson University, Towson, United States"],"email":"jfeng@towson.edu","is_corresponding":false,"name":"Jinjuan Heidi Feng"},{"affiliations":["SRI International, Menlo Park, United States"],"email":"louise.yarnall@sri.com","is_corresponding":false,"name":"Louise Yarnall"},{"affiliations":["University of Alabama, Tuscaloosa, United States"],"email":"jgong@umbc.edu","is_corresponding":false,"name":"Jiaqi Gong"}],"award":"","doi":"","event_id":"w-storygenai","event_title":"Workshop on Data Storytelling in an Era of Generative AI","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-storygenai-7072","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop6","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Workshop on Data Storytelling in an Era of Generative AI","session_uid":"w-storygenai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Workshop on Data Storytelling in an Era of Generative AI"],"time_stamp":"2024-10-13T16:00:00Z","title":"Show and Tell: Exploring Large Language Model\u2019s Potential inFormative Educational Assessment of Data Stories","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1009","abstract":"The introduction of novel visualizations through animated transitions is a well-established practice in visualization research. In our preliminary exploratory study, we investigate whether this approach could effectively facilitate the introduction of new visualization types to blind and low-vision (BLV) individuals. Specifically, we present two approaches, direct and gradual, to a user who is blind and compare their potential usefulness. The direct approach involved a single, comprehensive description of the visual elements, while the gradual approach utilized a series of visualizations and transitions, starting from familiar visualization types known to the user and progressing to the final, novel visualization. We introduce two genomics visualizations, sequence logos and Circos plots, to the user with descriptions and then ask them to sketch the visualizations to reflect their understanding of the visual elements. Feedback from the user indicates that the gradual approach was easier to follow, suggesting that BLV individuals could benefit more from this method. We outline our design process and insights from the study, and highlight key considerations for future research directions.","accessible_pdf":true,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"tsmits@hms.harvard.edu","is_corresponding":true,"name":"Thomas C. Smits"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"sehi_lyi@hms.harvard.edu","is_corresponding":false,"name":"Sehi L'Yi"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"huyen_nguyen@hms.harvard.edu","is_corresponding":false,"name":"Huyen N. Nguyen"},{"affiliations":["University of California, Berkeley, United States","Harvard Medical School, Boston, United States"],"email":"apmar@berkeley.edu","is_corresponding":false,"name":"Andrew P Mar"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1009","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/v7mxz","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-accessible/w-accessible-1009/w-accessible-1009_Preview.mp4?token=zl41wAq6_2TYIJKHqSkWTfw9TnIr_UEbl7kig20VTVQ&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"Explaining Unfamiliar Genomics Data Visualizations to a Blind Individual through Transitions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1011","abstract":"Content on the internet is often not accessible to all users. In particular with data visualizations, blind and visually impaired people face the problem that the presented data is either impossible or very difficult to access with the help of a screen reader. The aim of this paper is to develop a concept that enables screen reader users to explore online data visualizations. The concept should enable users to gain a comprehensive overview of the data and search for specific data items. In addition, sonification is integrated to help users understand the data. A user study with five non-sighted participants provides insight into how data visualizations can be explored with the help of the prototype.","accessible_pdf":true,"authors":[{"affiliations":["School of Informatics, Communications and Media, Hagenberg im M\u00fchlkreis, Austria"],"email":"s2110745013@students.fh-hagenberg.at","is_corresponding":false,"name":"Julia Loitzenbauer MSc"},{"affiliations":["University of Applied Sciences Upper Austria, Hagenberg im M\u00fchlkreis, Austria"],"email":"mandy.keck@fh-hagenberg.at","is_corresponding":true,"name":"Mandy Keck"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1011","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-accessible/w-accessible-1011/w-accessible-1011_Preview.mp4?token=UDUelzgQWxbtdbilMXKHVfJGRx8XObrEjCMjZXXUB0U&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"A Screen reader and Sonifcation Approach for non-sighted Users to explore Data Visualizations on the Internet","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1012","abstract":"Embedded information displays (EIDs) are becoming increasingly ubiquitous on home appliances and devices such as microwaves, coffee machines, fridges, or digital thermostats. These displays are often multi-purpose, functioning as interfaces for selecting device settings, communicating operating status using simple visualizations, and displaying notifications. However, their usability for people in the late adulthood (PLA) development stage is not well-understood. We report on two focus groups with PLA (n=11, ages 76-94) from a local retirement community. Participants were shown images of everyday home electronics and appliances, answering questions about their experiences using the EIDs. Using open coding, we qualitatively analyzed their comments to distill key themes regarding how EIDs can negatively affect PLA's ability to take in information (e.g., poor labels) and interact with these devices (e.g., unintuitive steps) alongside strategies employed to work around these issues. We argue that understanding the equitable design and communication of devices' functions, operating status, and messages is important for future information display designers. We hope this work stimulates further investigation into more equitable EID design.","accessible_pdf":true,"authors":[{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"zwhile@cs.umass.edu","is_corresponding":true,"name":"Zack While"},{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"hwheelerklai@umass.edu","is_corresponding":false,"name":"Henry Wheeler-Klainberg"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"research@blascheck.eu","is_corresponding":false,"name":"Tanja Blascheck"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"petra.isenberg@inria.fr","is_corresponding":false,"name":"Petra Isenberg"},{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"asarv@cs.umass.edu","is_corresponding":false,"name":"Ali Sarvghad"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1012","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2410.03929","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"Toward Understanding the Experiences of People in Late Adulthood with Embedded Information Displays in the Home","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1013","abstract":"Blind and visually impaired people are often excluded from the analysis of datasets because data visualizations primarily address the visual channel. For this reason, this paper examines different physical and tactile encodings for preparing datasets for non-sighted users. Using a user-centered design approach, the authors investigate how this target group perceive visualizations tactilely and to what extent different encodings are suitable for exploring different datasets. Furthermore, it will be investigated how tactile contextual components such as labels, legends, grids and guidelines must be designed so that the information can be interpreted as accurately as possible. A user study with five blind participants provided valuable insights for the design of tactile data physicalizations.","accessible_pdf":true,"authors":[{"affiliations":["School of Informatics, Communications and Media, Hagenberg im M\u00fchlkreis, Austria"],"email":"s2210631004@students.fh-hagenberg.at","is_corresponding":false,"name":"Julian Ebermann"},{"affiliations":["University of Applied Sciences Upper Austria, Hagenberg im M\u00fchlkreis, Austria"],"email":"mandy.keck@fh-hagenberg.at","is_corresponding":true,"name":"Mandy Keck"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1013","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-accessible/w-accessible-1013/w-accessible-1013_Preview.mp4?token=6pWsd2682UVONlb44eOHQ1e-4KzPr1bQrNxBU9s5Dec&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"From Sight to Touch: Designing Tactile Data Physicalizations for Non-sighted Users","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1014","abstract":"AChart is a suite of open-source web-based tools written in TypeScript with Node.js to create and interpret semantically-enriched SVG-based accessible charts.AChart Creator is a command-line tool which generates accessible SVG charts from CSV files using the D3 framework, by injecting ARIA roles and properties from the AChart taxonomy.AChart Interpreter is a client-side web application and exectubale package which interprets such a semantically-enriched SVG chart and displays side-by-side graphical and textual versions of the chart.It can read out the chart using synthetic speech and its user interface is screen reader compatible. It can be used both by blind users to gain an understanding of a chart, as well as by developers and chart authors to verify and validate the accessibility markup of an SVG chart.AChart Summariser is a command-line tool which interprets an accessible SVG chart and outputs a textual summary of the chart.AChart currently supports bar charts, line charts, and pie charts.","accessible_pdf":true,"authors":[{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"kandrews@iicm.edu","is_corresponding":true,"name":"Keith Andrews"},{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"chr.kopel@gmail.com","is_corresponding":false,"name":"Christopher Alexander Kopel"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-accessible-1014","image_caption":"AChart Interpreter showing an accessible multi-line chart. The user has navigated to the third data point of Data Series 1.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"Accessible SVG Charts with AChart","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1015","abstract":"Data visualizations are typically not accessible to blind and low-vision users. The most widely used remedy for making data visualizations accessible is text descriptions. Yet, manually creating useful text descriptions is often omitted by visualization authors, either because of a lack of awareness or a perceived burden. Automatically generated text descriptions are a potential partial remedy. However, with current methods it is unfeasible to create text descriptions for complex scientific charts. In this paper, we describe our methods for generating text descriptions for one complex scientific visualization: the UpSet plot. UpSet is a widely used technique for the visualization and analysis of sets and their intersections. At the same time, UpSet is arguably unfamiliar to novices and used mostly in scientific contexts. Generating text descriptions for UpSet plots is challenging because the patterns observed in UpSet plots have not been studied. We first analyze patterns present in dozens of published UpSet plots. We then introduce software that generates text descriptions for UpSet plots based on the patterns present in the chart. Finally, we introduce a web service that generates text descriptions based on a specification of an UpSet plot, and demonstrate its use in both an interactive web-based implementation and a static Python implementation of UpSet.","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"ishratjahan.eliza@utah.edu","is_corresponding":true,"name":"Ishrat Jahan Eliza"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jakew@sci.utah.edu","is_corresponding":false,"name":"Jake Wagoner"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jwilburn@sci.utah.edu","is_corresponding":false,"name":"Jack Wilburn"},{"affiliations":["Scientific Computing and Imaging Institute, Salt Lake City, United States"],"email":"natelanzadevelopment@gmail.com","is_corresponding":false,"name":"Nate Lanza"},{"affiliations":["University College London, London, United Kingdom"],"email":"d.hajas@ucl.ac.uk","is_corresponding":false,"name":"Daniel Hajas"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"alex@sci.utah.edu","is_corresponding":false,"name":"Alexander Lex"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1015","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"Accessible Text Descriptions for UpSet Plots","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1024","abstract":"Many data visualization tools require a mouse. While such tools widen access to data communication and expression, their implementations are difficult or impossible to use by people with certain disabilities who experience difficulties using a mouse. What if people could use them as easily with a keyboard? OpenKeyNav is a zero-dependency JavaScript code library that exposes a developer-friendly API for initiating keyboard accessibility enhancements. We demonstrate a usage scenario of OpenKeyNav for improving the keyboard-accessibility of Voyager 2, an open-source web-based data visualization tool based on the shelf configuration similar to industry-leading Tableau. Since mouse-driven interactions such as drag-and-drop are found in software in a broad range of industries, the interaction methods we describe have potential implications for the education, employment, and autonomy of people with motor disabilities in various fields. A demonstration is at https://voyager-keyboard-demo.github.io/. Its instructions are at https://github.com/voyager-keyboard-demo/voyager-keyboard-demo.github.io/","accessible_pdf":false,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"lawrence_weru@hms.harvard.edu","is_corresponding":true,"name":"Lawrence Weru"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"sehi_lyi@hms.harvard.edu","is_corresponding":false,"name":"Sehi L'Yi"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"t.smits@hs-mannheim.de","is_corresponding":false,"name":"Thomas C. Smits"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1024","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-accessible/w-accessible-1024/w-accessible-1024_Preview.mp4?token=8lGsNR-yNTYFMwXgOKmP4pfhkvVnr109s4zTcvwsE_M&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-accessible/w-accessible-1024/w-accessible-1024_Preview.srt?token=CRAl72_NyqaVFwJ6c3wj9h3uLdf_MXP0nOQPALAGHDU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"Using OpenKeyNav to Enhance the Keyboard-Accessibility of Web-based Data Visualization Tools","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-future-1007","abstract":"Data physicalizations are a time-tested practice for visualizing data, but the sustainability challenges of current physicalization practices have only recently been explored; for example, the usage of carbon-intensive, non-renewable materials like plastic and metal.This work explores clay physicalizations as an approach to these challenges. Using a three-stage process, we investigate the design and sustainability of clay 3D printed physicalizations: 1) exploring the properties and constraints of clay when extruded through a 3D printer, 2) testing a variety of data encodings that work within the constraints, and 3) introducing Rain Gauge, a clay physicalization exploring climate effects on climate data with an impermanent material. Throughout our process, we investigate the material circularity of clay-based digital fabrication by reclaiming and reusing the clay stock in each stage. Finally, we reflect on the implications of ceramic 3D printing for data physicalization through the lenses of practicality and sustainability.","accessible_pdf":true,"authors":[{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"bridger.g.herman@gmail.com","is_corresponding":true,"name":"Bridger Herman"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"jlrossi@umn.edu","is_corresponding":false,"name":"Jessica Rossi-Mastracci"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"will1070@umn.edu","is_corresponding":false,"name":"Heather Willy"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"mreicher@umn.edu","is_corresponding":false,"name":"Molly Reichert"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"dfk@umn.edu","is_corresponding":false,"name":"Daniel F. Keefe"}],"award":"","doi":"","event_id":"w-future","event_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-future-1007","image_caption":"Rain Gauge is a clay data physicalization depicting monthly precipitation data from 1944-2024 on a cylindrical surface. Left panel: monthly precipitation in Minneapolis, MN, USA is encoded as line length outward from the surface. Middle panel: the printing process uses a 3D PotterBot 10 Pro ceramic 3D printer. Right panel: the Rain Gauge was set outside in the rain to explore environment-driven unmaking with the clay material. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/3nyrq","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop8","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","session_uid":"w-future","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation"],"time_stamp":"2024-10-14T16:00:00Z","title":"Rain Gauge: Exploring the Design and Sustainability of 3D Printed Clay Physicalizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-future-1008","abstract":"We explain our model of data-in-a-void and contrast it with the idea of data-voids to explore how the different framings impact our thinking on sustainability. This contrast supports our assertion that how we think about the data that we work with for visualization design impacts the direction of our thinking and our work. To show this we describe how we view the concept of data-in-a-void as different from that of data-voids. Then we provide two examples, one that relates to existing data about bicycle mobility, and one about non-data for local food production. In the discussion, we then untangle and outline how our thinking about data for sustainability is impacted and influenced by the data-in-a-void model.","accessible_pdf":false,"authors":[{"affiliations":["University of Calgary, Calgary, Canada"],"email":"karly.ross@ucalgary.ca","is_corresponding":true,"name":"Karly Ross"},{"affiliations":["University of Calgary, Calgary, Canada"],"email":"pratim.sengupta@ucalgary.ca","is_corresponding":false,"name":"Pratim Sengupta"},{"affiliations":["University of Calgary, Calgary, Canada"],"email":"wj@wjwillett.net","is_corresponding":false,"name":"Wesley Willett"}],"award":"","doi":"","event_id":"w-future","event_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-future-1008","image_caption":"We compare two models of how we think about data to inform our visualization process. Left shows an abstracted data set with the areas with no data blanked out in grey. This model has many voids, but all within the existing data structure. On the right, a tiny speck of white is in a void. This speck indicates all the data that is collected in what we perceive to be an infinite field of all the data that could be collected. We use this second model to think about new possibilities in data visualization practices.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop8","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","session_uid":"w-future","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation"],"time_stamp":"2024-10-14T16:00:00Z","title":"(Almost) All Data is Absent Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-future-1011","abstract":"This study explores energy issues across various nations, focusing on sustainable energy availability and accessibility. Representatives from all continents were selected based on their HDI values. Data from Kaggle, spanning 2000-2020, was analyzed using Python to address questions on electricity access, renewable energy generation, and fossil fuel consumption. The research employed statistical and data visualization techniques to reveal trends and disparities. Findings underscore the importance of Python and Kaggle in data analysis. The study suggests expanding datasets and incorporating predictive modeling for future research to enhance understanding and decision-making in energy policies.","accessible_pdf":false,"authors":[{"affiliations":["Faculdade Nova Roma, Recife, Brazil"],"email":"gustavodssilva456@gmail.com","is_corresponding":true,"name":"Gustavo Santos Silva"},{"affiliations":["Faculdade Nova Roma, Recife, Brazil"],"email":"lartur671@gmail.com","is_corresponding":false,"name":"Artur Vin\u00edcius Lima Silva"},{"affiliations":["Faculdade Nova Roma, Recife, Brazil"],"email":"lpsouza612@gmail.com","is_corresponding":false,"name":"Lucas Pereira Souza"},{"affiliations":["Faculdade Nova Roma, Recife, Brazil"],"email":"adrianlauzid@gmail.com","is_corresponding":false,"name":"Adrian Lauzid"},{"affiliations":["Universidade Federal de Pernambuco, Recife, Brazil"],"email":"djmm@cin.ufpe.br","is_corresponding":false,"name":"Davi Maia"}],"award":"","doi":"","event_id":"w-future","event_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-future-1011","image_caption":"This study uses Python and open data from Kaggle to visualize renewable energy generation and fossil fuel consumption from 2000-2020 across diverse nations. The research reveals global trends, disparities in energy access, and the role of data in driving sustainable energy solutions. Our findings contribute to shaping energy policy and decision-making for a more sustainable future.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop8","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","session_uid":"w-future","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation"],"time_stamp":"2024-10-14T16:00:00Z","title":"Renewable Energy Data Visualization: A study with Open Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-future-1012","abstract":"Information visualization holds significant potential to support sustainability goals such as environmental stewardship, and climate resilience by transforming complex data into accessible visual formats that enhance public understanding of complex climate change data and drive actionable insights. While the field has predominantly focused on analytical orientation of visualization, challenging traditional visualization techniques and goals, through ``critical visualization'' research expands existing assumptions and conventions in the field. In this paper, I explore how reimagining overlooked aspects of data visualization\u2014such as engagement, emotional resonance, communication, and community empowerment\u2014can contribute to achieving sustainability objectives. I argue that by focusing on inclusive data visualization that promotes clarity, understandability, and public participation, we can make complex data more relatable and actionable, fostering broader connections and mobilizing collective action on critical issues like climate change. Moreover, I discuss the role of emotional receptivity in environmental data communication, stressing the need for visualizations that respect diverse cultural perspectives and emotional responses to achieve impactful outcomes. Drawing on insights from a decade of research in public participation and community engagement, I aim to highlight how data visualization can democratize data access and increase public involvement in order to contribute to a more sustainable and resilient future.","accessible_pdf":false,"authors":[{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"nmahyar@cs.umass.edu","is_corresponding":true,"name":"Narges Mahyar"}],"award":"","doi":"","event_id":"w-future","event_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-future-1012","image_caption":"This figure represents the paper's key points, including (1) a review of emerging visualization theories that prioritize community engagement and social aspects, (2) dimensions for fostering community engagement, and (3) leveraging insights from fields such as public participation, participatory design, and communication studies to inform new theory development. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop8","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","session_uid":"w-future","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation"],"time_stamp":"2024-10-14T16:00:00Z","title":"Reimagining Data Visualization to Address Sustainability Goals","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-future-1013","abstract":"This position paper discusses the role of data visualizations in journalism based on new areas of study such as visual journalism and data journalism, using examples from the coverage of the catastrophe that occurred in 2024 in Rio Grande do Sul, Brazil, affecting over 2 million people. This case served as a warning to the country about the importance of the climate change agenda and its consequences. The paper includes a literature review in the fields of journalism, data visualization, and psychology to explore the importance of data visualization in combating misinformation and in producing more reliable journalism as tool for fighting climate change","accessible_pdf":false,"authors":[{"affiliations":["Universidade Federal de Pernambuco, Recife, Brazil"],"email":"emilly.brito@ufpe.br","is_corresponding":true,"name":"Emilly Brito"},{"affiliations":["Universidade Federal de Pernambuco, Recife, Brazil"],"email":"nivan@cin.ufpe.br","is_corresponding":false,"name":"Nivan Ferreira"}],"award":"","doi":"","event_id":"w-future","event_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-future-1013","image_caption":"Data visualization example produced in the Brazilian Media about the catastrophe in Rio Grande do Sul.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop8","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","session_uid":"w-future","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation"],"time_stamp":"2024-10-14T16:00:00Z","title":"Visual and Data Journalism as Tools for Fighting Climate Change","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1008","abstract":"Presenting the effects of and effective countermeasures for climate change is a significant challenge in science communication. Data-driven storytelling and narrative visualization can be part of the solution. However, the communication is limited when restricted to global or cross-regional scales, as climate effects are particular to the location and adaptions need to be local. In this work, we focus on data-driven storytelling that communicates local impacts of climate change. We analyze the adoption of data-driven storytelling by local news media in addressing climate-related topics. Further, we investigate the specific characteristics of the local scenario and present three application examples to showcase potential local data-driven stories. Since these examples are rooted in university teaching, we also discuss educational aspects. Finally, we summarize the interdisciplinary research challenges and opportunities for application associated with data-driven storytelling in a local context.","accessible_pdf":false,"authors":[{"affiliations":["University of Bamberg, Bamberg, Germany"],"email":"fabian.beck@uni-bamberg.de","is_corresponding":true,"name":"Fabian Beck"},{"affiliations":["University of Bamberg, Bamberg, Germany"],"email":"lukas.panzer@uni-bamberg.de","is_corresponding":false,"name":"Lukas Panzer"},{"affiliations":["University of Bamberg, Bamberg, Germany"],"email":"marc.redepenning@uni-bamberg.de","is_corresponding":false,"name":"Marc Redepenning"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-vis4climate-1008","image_caption":"The figure illustrates the characteristics of local climate data stories, focusing on how data-driven storytelling can communicate the effects and mitigation of climate change in a localized context. It shows the relationships between climate change, locality, data, and citizens through key characteristics of the scenario. The characteristic emphasize that specific local relevance, limited scope, local context, and participation are linked with the input data. The data stories support stakeholder engagement through familiarity, interest, concern, participation, and ultimately actionable conclusions for citizens.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Local Climate Data Stories: Data-driven Storytelling to Communicate Effects and Mitigation of Climate Change in a Local Context","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1011","abstract":"Climate change\u2019s global impact calls for coordinated visualization efforts to enhance collaboration and communication among key partners such as domain experts, community members, and policy makers. We present a collaborative initiative, EcoViz, where visualization practitioners and key partners co-designed environmental data visualizations to illustrate impacts on ecosystems and the benefit of informed management and nature-based solutions. Our three use cases rely on unique processing pipelines to represent time-dependent natural phenomena by combining cinematic, scientific, and information visualization methods. Scientific outputs are displayed through narrative data-driven animations, interactive geospatial web applications, and immersive Unreal Engine applications. Each field\u2019s decision-making process is specific, driving design decisions about the best representation and medium for each use case. Data-driven cinematic videos with simple charts and minimal annotations proved most effective for engaging large, diverse audiences. This flexible medium facilitates reuse, maintains critical details, and integrates well into broader narrative videos. The need for interdisciplinary visualizations highlights the importance of funding to integrate visualization practitioners throughout the scientific process to better translate data and knowledge into informed policy and practice.","accessible_pdf":false,"authors":[{"affiliations":["University of California, San Diego, San Diego, United States"],"email":"jkb@ucsc.edu","is_corresponding":true,"name":"Jessica Marielle Kendall-Bar"},{"affiliations":["University of California, San Diego, La Jolla, United States"],"email":"inealey@ucsd.edu","is_corresponding":false,"name":"Isaac Nealey"},{"affiliations":["University of California, Santa Cruz, Santa Cruz, United States"],"email":"icostell@ucsc.edu","is_corresponding":false,"name":"Ian Costello"},{"affiliations":["University of California, Santa Cruz, Santa Cruz, United States"],"email":"chlowrie@ucsc.edu","is_corresponding":false,"name":"Christopher Lowrie"},{"affiliations":["University of California, San Diego, San Diego, United States"],"email":"khn009@ucsd.edu","is_corresponding":false,"name":"Kevin Huynh Nguyen"},{"affiliations":["University of California San Diego, La Jolla, United States"],"email":"pponganis@ucsd.edu","is_corresponding":false,"name":"Paul J. Ponganis"},{"affiliations":["University of California, Santa Cruz, Santa Cruz, United States"],"email":"mwbeck@ucsc.edu","is_corresponding":false,"name":"Michael W. Beck"},{"affiliations":["University of California, San Diego, San Diego, United States"],"email":"ialtintas@ucsd.edu","is_corresponding":false,"name":"\u0130lkay Alt\u0131nta\u015f"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-vis4climate-1011","image_caption":"Graphic showing the three use cases for EcoViz, a collaborative initiative to co-design multimodal environmental data visualizations. Above, we show an immersive Unreal Engine visualization of a controlled burn simulation to manage wildfire. Below, we show a photo-realistic rendering of hydrodynamic model outputs regarding the flood protection benefits of coral reefs. The circular graphic in the center shows thousands of autonomous profiling Argo floats that survey changes in temperature and salinity to track heat accumulation in the ocean. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-vis4climate/w-vis4climate-1011/w-vis4climate-1011_Preview.mp4?token=XM88vdwuEwLEUhn8-gWWbE6qIZuzh0fLBtKc3HP1dLg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-vis4climate/w-vis4climate-1011/w-vis4climate-1011_Preview.srt?token=6p9mlLEDy2QDkEGnKm78tvINdyMMpm4ZxOhMHEE59h4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"uX7XpQo2VGs","session_youtube_ff_link":"https://youtu.be/uX7XpQo2VGs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"EcoViz: an iterative methodology for designing multifaceted data-driven environmental visualizations that communicate ecosystem impacts and envision nature-based solutions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1018","abstract":"Household consumption significantly impacts climate change. Al- though interventions that make households aware of their consump- tion exist, tailoring the design to each home\u2019s needs remains chal- lenging. To address this, we developed Eco-Garden, a data sculp- ture designed to visualise household consumption aiming to pro- mote sustainable practices. Eco-Garden serves as both an aesthetic piece for visitors and a functional tool for household members to understand their resource consumption. In this paper, we present the human-centred design process of Eco-Garden and the prelim- inary findings we made through the field study. We conducted a field study with 15 households to explore participants\u2019 experience with Eco-Garden and its potential to encourage sustainable prac- tices at home. Our participants provided positive feedback on inte- grating Eco-Garden into their homes, highlighting considerations such as aesthetics, physicality, calm manner of presenting con- sumption data. Our Insights contribute to developing data sculp- tures for households that can facilitate meaningful interactions with consumption data.","accessible_pdf":false,"authors":[{"affiliations":["Cardiff University, UK, Cardiff, United Kingdom"],"email":"pereraud@cardiff.ac.uk","is_corresponding":true,"name":"Dushani Ushettige"},{"affiliations":["Cardiff University, Cardiff, United Kingdom"],"email":"verdezotodiasn@cardiff.ac.uk","is_corresponding":false,"name":"Nervo Verdezoto"},{"affiliations":["Cardiff University, Cardiff, United Kingdom"],"email":"lannon@cardiff.ac.uk","is_corresponding":false,"name":"Simon Lannon"},{"affiliations":["Cardiff Universiy, Cardiff, United Kingdom"],"email":"gwilliamja@cardiff.ac.uk","is_corresponding":false,"name":"Jullie Gwilliam"},{"affiliations":["Cardiff University, Cardiff, United Kingdom"],"email":"eslambolchilarp@cardiff.ac.uk","is_corresponding":false,"name":"Parisa Eslambolchilar"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1018","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Eco-Garden: A Data Sculpture to Encourage Sustainable Practices in Everyday Life in Households","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1023","abstract":"Consumers have the potential to play a large role in mitigating the climate crisis by taking on more pro-environmental behavior, for example by making more sustainable food choices. However, while environmental awareness is common among consumers, it is not always clear what the current impact of one's own food choices are, and consequently it is not always clear how or why their own behavior must change, or how important the change is. Immersive technologies have been shown to aid in these aspects. In this paper, we bring food production into the home by means of handheld augmented reality. Using the current prototype, users can input which ingredients are in their meal on their smartphone, and after making a 3D scan of their kitchen, plants, livestock, feed, and water required for all are visualized in front of them. In this paper, we describe the design of the current prototype and, by analyzing the current state of research on virtual and augmented reality for sustainability research, we describe in which ways the application could be extended in terms of data, models, and interaction, to investigate the most prominent issues within environmental sustainability communications research.","accessible_pdf":true,"authors":[{"affiliations":["Wageningen University and Research, Wageningen, Netherlands"],"email":"nina.rosa-dejong@wur.nl","is_corresponding":true,"name":"Nina Rosa"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-vis4climate-1023","image_caption":"Screenshots of the handheld augmented reality AwARe prototype, showing an ingredients list for a simple meat-centered meal, and crops, water and livestock required for the meat-centered meal, visualized in a kitchen and dining room. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://nerosa.nl/wp-content/uploads/2024/08/Viz4CandS_AwARe___author_version_for_open_access.pdf","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"AwARe: Using handheld augmented reality for researching the potential of food resource information visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1024","abstract":"This paper details the development and implementation of a collaborative exhibit at Boston\u2019s Museum of Science showcasing interactive data visualizations designed to educate the public on global sustainability and urban environmental concerns. Supported by cross-institutional collaboration, the exhibit provided a rich real-world learning opportunity for students, resulting in a set of public-facing educational resources that informed visitors of global sustainability concerns through the lens of a local municipality. The realization of this project was made possible only by a close collaboration between a municipality, science museum and academic partners, all who committed their expertise and resources at both leadership and implementation team levels.This initiative highlights the value of cross- institutional collaboration to ignite the transformative potential of interactive visualizations in driving public engagement of local and global sustainability issues. Focusing on promoting sustainability and enhancing community well-being, this initiative highlights the potential of cross-institutional collaboration and locally-relevant interactive data visualizations to educate, inspire action, and foster community engagement in addressing climate change and urban sustainability.","accessible_pdf":false,"authors":[{"affiliations":["Brown University, Providence, United States","Rhode Island School of Design, Providence, United States"],"email":"bae@brown.edu","is_corresponding":true,"name":"Beth Altringer Eagle"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"sylvan@media.mit.edu","is_corresponding":false,"name":"Elisabeth Sylvan"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-vis4climate-1024","image_caption":"Four examples of interactive data visualizations created by students at Harvard, Brown and RISD using open data from the city of Boston and presented at the Museum of Science","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-vis4climate/w-vis4climate-1024/w-vis4climate-1024_Preview.mp4?token=aQj_tjR4bHboHrvd75dSimXws-mWAlFpe0BQ2-hucto&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"qZBMOrWz8hI","session_youtube_ff_link":"https://youtu.be/qZBMOrWz8hI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Cultivating Climate Action Through Multi-Institutional Collaboration: Innovative Data Visualization Educational Programs and Exhibits for Public Engagement","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1010","abstract":"The urgency of climate change is now recognized globally. As humanity confronts the critical need to mitigate climate change and foster sustainability, data visualization emerges as a powerful tool with a unique capacity to communicate insights crucial for understanding environmental complexities. This paper explores the critical need for designing and investigating responsible data visualization that can act as a catalyst for engaging communities within global climate action and sustainability efforts. Grounded in prior work and reflecting on a decade of community engagement research, I propose five critical considerations: (1) inclusive and accessible visualizations for enhancing climate education and communication, (2) interactive visualizations for fostering agency and deepening engagement, (3) in-situ visualizations for reducing spatial indirection, (4) shared immersive experiences for catalyzing collective action, and (5) accurate, transparent, and credible visualizations for ensuring trust and integrity. These considerations offer strategies and new directions for visualization research, aiming to enhance community engagement, deepen involvement, and foster collective action on critical socio-technical including and beyond climate change.","accessible_pdf":false,"authors":[{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"narges.mahyar@gmail.com","is_corresponding":true,"name":"Narges Mahyar"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1010","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Harnessing Visualization for Climate Action and Sustainable Future","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1020","abstract":"Satellite Earth Observation (EO) data is essential for tracking climate change trends and their impacts on ecosystems, however conventional methods of presenting EO data often fail to effectively communicate the intricate relationships between climate causes and effects in hyperlocal contexts. To address this challenge, this paper investigates the use of advanced data visualization techniques, focusing on the potential of Augmented Reality (AR) and Virtual Reality (VR) to enhance EO data understanding and climate storytelling. Leveraging the MIT Media Lab's Earth Mission Control (EMC) AR/VR platform, the paper details how immersive VR environments can simplify complex climate data narratives, and enhance the ability of decision-makers to analyze, interact with, and understand EO data. The paper presents the architecture of EMC\u2019s platform, including key design features such as: information dashboard carousel; map table; globe; and dynamic scenic VR environments. User feedback from diverse stakeholders reveals significant improvements in climate communication and decision-making, emphasizing the capability of leveraging immersive technologies to address global climate challenges.","accessible_pdf":false,"authors":[{"affiliations":["MIT Media Lab, Cambridge, MA, United States"],"email":"minoo@media.mit.edu","is_corresponding":false,"name":"Minoo Rathnasabapathy"},{"affiliations":["MIT Media Lab, Cambridge, United States"],"email":"dnewman@mit.edu","is_corresponding":false,"name":"Dava Newman"},{"affiliations":["MIT Media Lab, Cambridge, United States"],"email":"rachelbc@media.mit.edu","is_corresponding":true,"name":"Rachel Connolly"},{"affiliations":["MIT Media Lab, Cambridge, United States"],"email":"pcherner@mit.edu","is_corresponding":false,"name":"Phillip Cherner"},{"affiliations":["MIT Media Lab, Cambridge, United States"],"email":"palmjad2@mit.edu","is_corresponding":false,"name":"Jaden Palmer"},{"affiliations":["NASA Goddard Space Flight Center, Greenbelt, United States"],"email":"mark.u.subbarao@nasa.gov","is_corresponding":false,"name":"Mark SubbaRao"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1020","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Earth Mission Control: Advanced Data Visualizations for Climate Intelligence","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1029","abstract":"Artists have been speaking to, and creating paths for reflection on, fundamental threats to society and our lives as far back as we can document. Our changing climate is one such threat demanding meaningful narratives. In this short paper, we present the work of six internationally recognized artists addressing climate change, along with an analysis of their common work threads, toward the goal of promoting adoption of some of the \"tools\" in their toolkit. By doing so, we hope we can assist the visualization community in creating content that moves beyond intellectual understand toward an emotional adoption and thus action.","accessible_pdf":false,"authors":[{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"fsamsel@tacc.utexas.edu","is_corresponding":true,"name":"Francesca Samsel"},{"affiliations":["Rhode Island School of Design, Providence, United States"],"email":"bcampbel01@risd.edu","is_corresponding":false,"name":"Bruce Donald Campbell"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1029","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Artists, Data and Climate Change: Distilled messages, multiple entry points, layered metaphor","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1033","abstract":"Freshwater floods during hurricanes are known to cause significant damage to life and property. We could be better prepared to prevent these losses if flood forecasts can be made accurately and understood effectively. In addition to the technical complexities when modeling freshwater systems, forecasting freshwater floods also involves numerous uncertainties which also need to be considered to make reliable data driven decisions. In this demo, we describe the design and implementation of HydroVis\u2013a decision support system designed to help both weather scientists to triage the flood forecasting models, and the policymakers to help them understand the forecasts effectively and make informed decisions accordingly.","accessible_pdf":false,"authors":[{"affiliations":["University of Washington, Seattle, United States"],"email":"ameyap2@cs.washington.edu","is_corresponding":true,"name":"Ameya B Patil"},{"affiliations":["National Center for Atmospheric Research, Boulder, United States"],"email":"masmith@ucar.edu","is_corresponding":false,"name":"Marlee Smith"},{"affiliations":["National Center for Atmospheric Research, Boulder, United States"],"email":"hkershaw@ucar.edu","is_corresponding":false,"name":"Helen Kershaw"},{"affiliations":["National Center for Atmospheric Research, Boulder, United States"],"email":"gharamti@ucar.edu","is_corresponding":false,"name":"Moha El Gharamti"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1033","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Interactive Visualization of Ensemble Data Assimilation Forecasts for Freshwater Floods","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1034","abstract":"While there is a well-known gap between what the general public and policymakers understand about science and what is known by experts, this gap is particularly perilous in regard to climate change. Currently, scientists inform each other via expert publications and conferences. We, as part of the public and policymakers, receive our information via the media and the web \u2013 and in our current catastrophic blending of information with misinformation, we are at risk of well-intentionally taking ineffective or even harmful actions and decisions. To close this gap, a team of experts in data visualization, narrative construction, data comics, and climate change work collaboratively to develop climate change data comics that combine compelling narratives with comprehensible data visuals that are informed and verified by the appropriate scientists. This pictorial outlines our approach and provides two examples, emphasizing the integration of storytelling, scientific explanation, and data visualization through expressive visual presentations.","accessible_pdf":false,"authors":[{"affiliations":["Simon Fraser University, Vancouver, Canada"],"email":"wangzezhong2016@gmail.com","is_corresponding":true,"name":"Zezhong Wang"},{"affiliations":["Carleton University, Ottawa, Canada"],"email":"stephan.gruber@carleton.ca","is_corresponding":false,"name":"Stephan Gruber"},{"affiliations":["University of Manitoba, Winnipeg, Canada"],"email":"claire.herbert@umanitoba.ca","is_corresponding":false,"name":"Claire Herbert"},{"affiliations":["Simon Fraser University, Vancouver, Canada"],"email":"zandria_sarrazin@sfu.ca","is_corresponding":false,"name":"Zandria Sarrazin"},{"affiliations":["SFU, Burnaby, Canada"],"email":"mnl@sfu.ca","is_corresponding":false,"name":"Michelle Levy"},{"affiliations":["Simon Fraser University, Burnaby, Canada"],"email":"sheelagh@sfu.ca","is_corresponding":false,"name":"Sheelagh Carpendale"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1034","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Data Comics for Climate Change","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1038","abstract":"The impacts of climate change are intensifying existing vulnerabilities and disparities within urban communities around the globe, as extreme weather events, including floods and heatwaves, are becoming more frequent and severe, disproportionately affecting low-income and underrepresented groups. Tackling these increasing challenges requires novel approaches that integrate expertise across multiple domains, including computer science, engineering, climate science, and public health. Urban computing can play a pivotal role in these efforts by integrating data from multiple sources to support decision-making and provide actionable insights into weather patterns, infrastructure weaknesses, and population vulnerabilities. However, the capacity to leverage technological advancements varies significantly between the Global South and Global North. In this paper, we present two multiyear, multidisciplinary projects situated in Chicago, USA and Niter\u00f3i, Brazil, highlighting the opportunities and limitations of urban computing in these diverse contexts. Reflecting on our experiences, we then discuss the essential requirements, as well as existing gaps, for visual analytics tools that facilitate the understanding and mitigation of climate-related risks in urban environments.","accessible_pdf":false,"authors":[{"affiliations":["University of Illinois, Chicago, United States"],"email":"carolvfs@illinois.edu","is_corresponding":false,"name":"Carolina Veiga"},{"affiliations":["University of Illinois, Chicago, United States"],"email":"sharmaa@illinois.edu","is_corresponding":false,"name":"Ashish Sharma"},{"affiliations":["Universidade Federal Fluminense, Niter\u00f3i, Brazil"],"email":"danielcmo@ic.uff.br","is_corresponding":false,"name":"Daniel de Oliveira"},{"affiliations":["Universidade Federal Fluminense , Niteroi, Brazil"],"email":"mlage@ic.uff.br","is_corresponding":false,"name":"Marcos Lage"},{"affiliations":["University of Illinois Chicago, Chicago, United States"],"email":"fabiom@uic.edu","is_corresponding":true,"name":"Fabio Miranda"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1038","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Urban Computing for Climate And Environmental Justice: Early Perspectives From Two Research Initiatives","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1039","abstract":"This position statement discusses the challenges of designing visualizations to enhance the carbon numeracy of the general public. Carbon numeracy refers to an individual's quantitative awareness of their CO2 emissions, which can vary widely from grams to tons across different activities. Effective visualizations must accurately represent these ranges and facilitate quantitative comparisons. By leveraging insights from both visualization research and cognitive psychology on numerical perception and the representation of large numbers, we propose two novel design solutions to address these challenges. We aim to foster discussions on improving public carbon numeracy, ultimately aiding in mitigating climate change.","accessible_pdf":false,"authors":[{"affiliations":["Berger-Levrault, Boulogne-Billancourt, France","Inria, Saclay, France"],"email":"kbatziakoudi@gmail.com","is_corresponding":true,"name":"Katerina Batziakoudi"},{"affiliations":["Aviz, Inria, Saclay, France","LISN, Universit\u00e9 Paris-Saclay, CNRS, Orsay, France"],"email":"florent.cabric.pro@gmail.com","is_corresponding":false,"name":"Florent Cabric"},{"affiliations":["Berger-Levrault, Toulouse, France"],"email":"stephanie.rey@berger-levrault.com","is_corresponding":false,"name":"St\u00e9phanie Rey"},{"affiliations":["Inria, Saclay, France","Universit\u00e9 Paris-Saclay, CNRS, Orsay, France"],"email":"jean-daniel.fekete@inria.fr","is_corresponding":false,"name":"Jean-Daniel Fekete"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1039","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Designing Visualizations for Enhancing Carbon Numeracy","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1040","abstract":"The Intergovernmental Panel on Climate Change (IPCC) plays a pivotal role in assessing and communicating climate science through its comprehensive reports. Despite the IPCC's efforts to provide source code and data for report figures, reproducing these figures is still challenging. This paper details our approach and the obstacles encountered in creating reproducible visualizations from the IPCC Working Group 1 data. Our work involved developing a set of front-end GitHub repositories that build upon the IPCC's original resources, incorporating reproducibility instructions and scripts to closely replicate the report\u2019s figures. By providing reproducible figures, we aim to enhance public engagement and contribution to climate change communication, ensuring accuracy and facilitating iterative improvements in figure presentation.","accessible_pdf":false,"authors":[{"affiliations":["Zhejiang University, Hangzhou, China","INRIA, Saclay, France"],"email":"yingluu@zju.edu.cn","is_corresponding":true,"name":"Lu Ying"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"},{"affiliations":["Inria, Saclay, France"],"email":"jean-daniel.fekete@inria.fr","is_corresponding":false,"name":"Jean-Daniel Fekete"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1040","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":null,"session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Exploring the Reproducibility for Visualization Figures in Climate Change Report","youtube_ff_id":null,"youtube_ff_url":null}] +[{"UID":"w-visxai-1591","abstract":"Linear algebra and matrix computations are often presented in math class as an array of inane formulas and calculations to drill and memorize. This explorable explainer attempts to present a deeper and more visual intuition behind what matrices represent. It experiments with a different kind of medium to present concepts to the reader. Animations of visuals are tied to the reader\u2019s scroll, allowing fine-grained control over more complex transitions. The piece also concludes with an interactive sandbox that readers can fiddle around with to reinforce their understanding and to challenge their intuitions. Readers can adjust the values of the input matrix even in three dimensions, and observe its result on the linear transformation on different kinds of objects \u2013 such as points in space, vectors, and even images and 3D models. https://yizhe-ang.github.io/matrix-explorable/","accessible_pdf":false,"authors":[{"affiliations":["National University of Singapore, Singapore, Singapore"],"email":"ang.yizhe@u.nus.edu","is_corresponding":true,"name":"Yi Zhe Ang"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-1591","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"The Matrix Arcade: A Visual Explorable of Matrix Transformations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-2024","abstract":"Though deep learning models have achieved remarkable success in diverse domains (e.g., facial recognition, autonomous driving), these models have been proven to be quite brittle to perturbations around the input data. Adversarial machine learning (AML) studies attacks that can fool machine learning models into generating incorrect outcomes as well as the defenses against worst-case attacks to strengthen model robustness. Specifically, for image classification, it is challenging to understand adversarial attacks due to their use of subtle perturbations that are not human-interpretable, as well as the variability of attack impacts influenced by attack methods, instance differences, or model architectures. This guide will utilize interactive visualizations to provide a non-expert introduction to adversarial attacks, and visualize the impact of FGSM attacks on two different ResNet-34 models.","accessible_pdf":false,"authors":[{"affiliations":["University of Waterloo, Waterloo, Canada"],"email":"y28you@uwaterloo.ca","is_corresponding":true,"name":"Yuzhe You"},{"affiliations":["University of Waterloo, Waterloo, Canada"],"email":"jianzhao@uwaterloo.ca","is_corresponding":false,"name":"Jian Zhao"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-2024","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/UUkftG2KH5o&t=1h31m3s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h31m3s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Panda or Gibbon? A Beginner's Introduction to Adversarial Attacks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-2967","abstract":"Algorithmic rankers have proven to be very useful in many real-world socio-technical systems, as they assist greatly in making decisions (e.g., who to hire, who to admit). Our conversational interface, TalkToRanker, aims to empower non-expert information consumers to engage with algorithmic rankers via multi-modal conversations involving text and visualizations. We leverage explainable AI methods and the generative power of large language models (LLMs) for facilitating such conversations. We demonstrate the capabilities of TalkToRanker via interactive scenarios from the perspective of an admissions officer.","accessible_pdf":false,"authors":[{"affiliations":["New Jersey Institute of Technology, Newark, United States"],"email":"conor2fitzpatrick@gmail.com","is_corresponding":true,"name":"Conor Fitzpatrick"},{"affiliations":["New Jersey Institute of Technology, Newark, United States"],"email":"jy448@njit.edu","is_corresponding":false,"name":"Jun Yuan"},{"affiliations":["New Jersey Institute of Technology, Newark, United States"],"email":"dasgupta.aritra@gmail.com","is_corresponding":false,"name":"Aritra Dasgupta"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-2967","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/UUkftG2KH5o&t=0h55m17s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=0h55m17s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"TalkToRanker: A Conversational Interface for Ranking-based Decision-Making","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-3472","abstract":"Deciphering the regulatory logic of RNA splicing, a critical process in genome function, remains a major challenge in modern biology. While various machine learning models have been proposed to address this issue, many of them fall short in terms of interpretability, unable to articulate how they arrive at their predictions. We recently introduced an interpretable machine learning model that predicts splicing outcomes based on input sequence and structure. Here, we present a series of interactive data visualization tools to illuminate the process behind the network's predictions. Specifically, we introduce visualizations that emphasize both the global and local interpretability of our model. These visualizations emphasize the clear intermediate reasoning stages of our model that trace how specific RNA features contribute to the final splicing prediction. We highlight how these visualizations can be used to explain the network\u2019s performance on prior training and validation datasets. Finally, we explore how these interactive visualizations can be harnessed to facilitate domain-specific applications, such as rational design of RNA sequences with desired splicing outcomes. Together, these visualizations highlight the role of data visualization and interactivity in enhancing machine learning interpretability and model adoption.","accessible_pdf":false,"authors":[{"affiliations":["New York University, New York, United States"],"email":"msa8779@nyu.edu","is_corresponding":false,"name":"Mateus Silva Aragao"},{"affiliations":["New York University, New York, United States"],"email":"sz3991@nyu.edu","is_corresponding":true,"name":"Shiwen Zhu"},{"affiliations":["New York University, New York, United States"],"email":"nhi.nguyen@nyu.edu","is_corresponding":false,"name":"Nhi Nguyen"},{"affiliations":["University of Pennsylvania, Philadelphia, United States"],"email":"garciarjr.alejandro@gmail.com","is_corresponding":false,"name":"Alejandro Garcia"},{"affiliations":["New York University, New York, United States"],"email":"sl7927@nyu.edu","is_corresponding":false,"name":"Susan Elizabeth Liao"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-3472","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/UUkftG2KH5o&t=1h41m22s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h41m22s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Inside an interpretable-by-design machine learning model: enabling RNA splicing rational design","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-3505","abstract":"Graph Neural Networks (GNNs) have gained huge success in a variety of applications, from modeling protein-protein interactions in biomedical graphs to identifying fraud in social networks. However, the complex structures of graphs and the complicated inner workings of graph neural networks make it hard for non-AI-experts to understand the essential concepts of GNNs. To address this, we present GNN 101, an educational visualization tool designed for interactive learning of GNNs. GNN 101 seamlessly integrates different levels of abstraction, including a model overview, layer operations, and detailed animations for matrix calculations, with smooth transitions between them. It offers both a node-link view and a matrix view, which complement each other. The node-link view supports an intuitive understanding of the graph structure, while the matrix view provides a space-efficient and comprehensive overview of all features and their changes across layers. GNN 101 not only reveals the computation of GNN in an engaging and intuitive way but also effectively demonstrates how node features update layer by layer through learning from their neighbors. It runs locally in web browsers using ONNX Runtime without additional installations or setups.","accessible_pdf":false,"authors":[{"affiliations":["University of Minnesota, Twin Cities, Minneapolis , United States"],"email":"lu000661@umn.edu","is_corresponding":false,"name":"Yilin Lu"},{"affiliations":["University of Minnesota, minneapolis, United States"],"email":"chen8596@umn.edu","is_corresponding":false,"name":"Chongwei Chen"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"mattx0601@gmail.com","is_corresponding":false,"name":"Matthew Xu"},{"affiliations":["University of Minnesota, Minneapolis , United States"],"email":"qianwen@umn.edu","is_corresponding":true,"name":"Qianwen Wang"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-3505","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/UUkftG2KH5o&t=1h35m47s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h35m47s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"What Can a Node Learn from Its Neighbors in Graph Neural Networks?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-3795","abstract":"The goal of this post is to build intuition around localizing information, something we naturally do to make sense of the world, and show how it can be formulated with machine learning as a route to interpretability. The long and short is that we can view the information in data as composed of specific distinctions worth making, in that these distinctions tell us the most about some other quantity we care about.","accessible_pdf":false,"authors":[{"affiliations":["University of Pennsylvania, Philadelphia, United States"],"email":"kieranm@seas.upenn.edu","is_corresponding":true,"name":"Kieran Murphy"},{"affiliations":["University of Pennsylvania, Philadelphia, United States"],"email":"dsb@seas.upenn.edu","is_corresponding":false,"name":"Dani S. Bassett"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-3795","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/UUkftG2KH5o&t=1h9m32s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h9m32s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Where is the information in data?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-4284","abstract":"In this article, we present several key concepts about empirical neural network robustness, including PGD attack, adversarial training, and accuracy-robustness tradeoff, with interactive visualizations.","accessible_pdf":false,"authors":[{"affiliations":["University of Maryland, College Park, United States"],"email":"cchen24@umd.edu","is_corresponding":true,"name":"Chen Chen"},{"affiliations":["Arizona state university, Tempe, United States"],"email":"jhuan196@asu.edu","is_corresponding":false,"name":"Jinbin Huang"},{"affiliations":["University of Maryland, College Park, United States"],"email":"eremsber@terpmail.umd.edu","is_corresponding":false,"name":"Ethan M Remsberg"},{"affiliations":["University of Maryland, College Park, United States"],"email":"leozcliu@umd.edu","is_corresponding":false,"name":"Zhicheng Liu"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-4284","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/UUkftG2KH5o&t=1h26m6s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h26m6s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"A Visual Tour to Empirical Neural Network Robustness","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-4395","abstract":"Large Language Models (LLMs) have revolutionized machine learning and natural language processing, demonstrating remarkable versatility across various tasks. Despite their advancements, their application in critical fields is hindered by a lack of effective interpretability and explainability. In our company, we have fine-tuned a text-to-command conversational AI model that translates natural language inputs into executable network commands. This paper presents our findings on explaining the model\u2019s reasoning processes, aiming to enhance understanding, identify biases, and improve performance. We explore techniques such as token attributions, hidden state visualizations, neuron activation, and attention mechanisms to elucidate model behavior. Our work contributes to the development of more interpretable and trustworthy AI systems, pushing the boundaries of conversational AI.","accessible_pdf":false,"authors":[{"affiliations":["Cisco Systems , Rolle, Switzerland"],"email":"p.stupar@outlook.com","is_corresponding":true,"name":"Petar Stupar"},{"affiliations":["HES-SO, Sion, Switzerland"],"email":"gregory.mermoud@proton.me","is_corresponding":false,"name":"Gregory Mermoud"},{"affiliations":["Cisco Systems, Pairs, France"],"email":"jpvasseur22@gmail.com","is_corresponding":false,"name":"Jean-Philippe Vasseur"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-4395","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/UUkftG2KH5o&t=0h47m29s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=0h47m29s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Explaining Text-to-Command Conversational Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-5402","abstract":"","accessible_pdf":false,"authors":[{"affiliations":["Google Research, Cambridge, United States"],"email":"nadahussein@google.com","is_corresponding":true,"name":"Nada Hussein"},{"affiliations":["Google Research, New York, United States"],"email":"asma_gh@mit.edu","is_corresponding":false,"name":"Asma Ghandeharioun"},{"affiliations":["Google Research, Cambridge, United States"],"email":"ryanmullins@google.com","is_corresponding":false,"name":"Ryan Mullins"},{"affiliations":["Google, Cambridge, United States"],"email":"ereif@google.com","is_corresponding":false,"name":"Emily Reif"},{"affiliations":["Google Research, Mountain View, United States"],"email":"jimbo@google.com","is_corresponding":false,"name":"Jimbo Wilson"},{"affiliations":["Google, Montreal, Canada"],"email":"nthain@google.com","is_corresponding":false,"name":"Nithum Thain"},{"affiliations":["Google, Paris, France"],"email":"ldixon@google.com","is_corresponding":false,"name":"Lucas Dixon"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-5402","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/UUkftG2KH5o&t=1h16m13s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h16m13s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Can Large Language Models Explain Their Internal Mechanisms?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-6211","abstract":"Do you want to understand exactly how AlphaFold3 works? The architecture is quite complicated and the description in the paper can be overwhelming, so we made a much more accessible (but just as detailed!) visual walkthrough. There are already many great explanations of the motivation for protein structure prediction, the CASP competition, model failure modes, debates about evaluations, implications for biotech, etc. so we don\u2019t focus on any of that. Instead we explore the how. How are these molecules represented in the model and what are all of the operations that convert their sequences into a predicted structure? As we walk through every step of this process, we explain 30 algorithms in ~40 clear diagrams, then share some thoughts on how they fit into the broader landscape of ML trends.","accessible_pdf":false,"authors":[{"affiliations":["Stanford University, Palo Alto, United States"],"email":"elanasimon95@gmail.com","is_corresponding":false,"name":"Elana P Simon"},{"affiliations":["Stanford, Stanford, United States"],"email":"jsilberg@stanford.edu","is_corresponding":false,"name":"Jake Silberg"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-6211","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/UUkftG2KH5o&t=1h20m14s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h20m14s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"The Illustrated AlphaFold","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-6324","abstract":"Prompt engineering is an emerging field where researchers are discovering new patterns of communication between humans and large language models. Powerful new abstractions like few-shot examples, tool use and reflection give prompt engineers the ability to create increasingly complex tasks for language models to solve while also opening up opportunities to visualize large prompts more succinctly. ExplainPrompt is a AI visualization project which is mapping out this new language of prompts and distilling them down into a clear and simple visualization style for prompt engineering.","accessible_pdf":false,"authors":[{"affiliations":["GitHub, San Francisco, United States"],"email":"narphorium@gmail.com","is_corresponding":true,"name":"Shawn Simister"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-6324","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"ExplainPrompt: Decoding the language of AI prompts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-visxai-9042","abstract":"Transformers, initially designed for Natural Language Processing, have emerged as a strong alternative to Convolutional Neural Networks in Computer Vision. However, their interpretability remains challenging. We overcome the limitations of earlier studies by offering interactive components, engaging the user in the exploration of the Vision Transformer (ViT). Furthermore, we offer various complementary explainability methods to challenge the insight they provide. Key contributions include: - Interactive analysis of the ViT architecture and explainability methods. - Identifying critical information from input images used for classification. - Investigating neuron activations at various depths to understand learned features. - Introducing an innovative adaptation of activation maximization for attention scores to trace attention head focus across network layers. - Highlighting the limitations of each method through occlusion-based interaction. Our findings include that ViTs tend to generalize well by relying on a broad set of object features and contexts seen in the input image. Furthermore, the focus of neurons and attention heads shifts to more complex patterns at deeper layers. We also acknowledge that we cannot rely on a single explainability method to understand the decision-making process of transformers. Our blog post provides an engaging and multi-facetted interpretation of the ViT to the readers by combining interactivity with key research questions.","accessible_pdf":false,"authors":[{"affiliations":["ETH Zurich, Z\u00fcrich, Switzerland"],"email":"anmarx@student.ethz.ch","is_corresponding":false,"name":"Anne Marx"},{"affiliations":["Eth Zurich , Z\u00fcrich, Switzerland"],"email":"yumikimi381@gmail.com","is_corresponding":false,"name":"Yumi Kim"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"luca.sichi@hotmail.com","is_corresponding":false,"name":"Luca Sichi"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"diego.arapovic@gmail.com","is_corresponding":false,"name":"Diego Arapovic"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland","ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"jsanguino@student.ethz.ch","is_corresponding":false,"name":"Javier Sanguino Bautiste"},{"affiliations":["ETH, Zurich, Switzerland","ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"rita.sevastjanova@inf.ethz.ch","is_corresponding":false,"name":"Rita Sevastjanova"},{"affiliations":["ETH Zurich, Zurich, Switzerland","ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"melassady@ai.ethz.ch","is_corresponding":false,"name":"Mennatallah El-Assady"}],"award":"","doi":"","event_id":"w-visxai","event_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-visxai-9042","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/UUkftG2KH5o&t=1h4m4s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated2","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VISxAI: 7th Workshop on Visualization for AI Explainability","session_uid":"w-visxai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/UUkftG2KH5o&t=1h4m4s","sessions":["VISxAI: 7th Workshop on Visualization for AI Explainability"],"time_stamp":"2024-10-13T12:30:00Z","title":"Explainability Perspectives on a Vision Transformer: From Global Architecture to Single Neuron","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1027","abstract":"Advances in high-performance computing require new ways to represent large-scale scientific data to support data storage, data transfers, and data analysis within scientific workflows. Multivariate functional approximation (MFA) has recently emerged as a new continuous meshless representation that approximates raw discrete data with a set of piecewise smooth functions. An MFA model of data thus offers a compact representation and supports high-order evaluation of values and derivatives anywhere in the domain. In this paper, we present CPE-MFA, the first critical point extraction framework designed for MFA models of large-scale, high-dimensional data. CPE-MFA extracts critical points directly from an MFA model without the need for discretization or resampling. This is the first step toward enabling continuous implicit models such as MFA to support topological data analysis at scale.","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"guanqunma94@gmail.com","is_corresponding":true,"name":"Guanqun Ma"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"dlenz@anl.gov","is_corresponding":false,"name":"David Lenz"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"tpeterka@mcs.anl.gov","is_corresponding":false,"name":"Tom Peterka"},{"affiliations":["The Ohio State University, Columbus, United States"],"email":"guo.2154@osu.edu","is_corresponding":false,"name":"Hanqi Guo"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"wang.bei@gmail.com","is_corresponding":false,"name":"Bei Wang"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1027","image_caption":"Critical points identified by CPE-MFA and TTK-MFA. CPE-MFA: our method in a continuous domain. TTK-MFA: a discrete approach implemented in the topology tool kit. Yellow means the perfect alignment between CPE-MFA and TTK-MFA. Purple represents the critical points from TTK-MFA. Pink represents the critical points from CPE-MFA.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.13193","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1027/w-topoinvis-1027_Preview.mp4?token=qfN8QlF5-hfBnFx-u_zUaqlRj9AvuCMVLamZhK9eq_0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1027/w-topoinvis-1027_Preview.srt?token=lA4MEqtRmHXqAZxPCngtyJcdlU8tEXCX1OxWyYFcCHM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"-J4QrJ3FOSA","session_youtube_ff_link":"https://youtu.be/-J4QrJ3FOSA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Critical Point Extraction from Multivariate Functional Approximation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1031","abstract":"3D symmetric tensor fields have a wide range of applications in science and engineering. The topology of such fields can provide critical insight into not only the structures in tensor fields but also their respective applications. Existing research focuses on the extraction of topological features such as degenerate curves and neutral surfaces. In this paper, we investigate the asymptotic behaviors of these topological features in the sphere of infinity. Our research leads to both theoretical analysis and observations that can aid further classifications of tensor field topology.","accessible_pdf":false,"authors":[{"affiliations":["Oregon State University, Corvallis, United States"],"email":"linxinw@oregonstate.edu","is_corresponding":false,"name":"Xinwei Lin"},{"affiliations":["Oregon State University, Corvallis, United States"],"email":"zhangyue@oregonstate.edu","is_corresponding":true,"name":"Yue Zhang"},{"affiliations":["Oregon State University, Corvallis, United States"],"email":"zhange@eecs.oregonstate.edu","is_corresponding":false,"name":"Eugene Zhang"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1031","image_caption":"The asymptotic behaviors of a 3D linear tensor field can be understood by the tensor mode function on the sphere of infinity.In this figure, we show the four topologically different cases: (a) two degenerate curves and the neutral surface with one boundary, (b) two degenerate curves and the neutral surface with three boundaries, (c) four degenerate curves and the neutral surface with one boundary, and (d) four degenerate curves and the neutral surface with three boundaries.In each of these cases, the degenerate curves intersect the sphere of infinity at the global maxima (yellow dots) and global minima (green dots) of the tensor mode function. Similarly, the neutral surface intersects the sphere of infinity at precisely the zeroth level set of the mode function. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Asymptotic Topology of 3D Linear Symmetric Tensor Fields","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1033","abstract":"Jacobi sets are an important method to investigate the relationship between Morse functions. The Jacobi set for two Morse functions is the set of all points where the functions' gradients are linearly dependent. Both the segmentation of the domain by Jacobi sets and the Jacobi sets themselves have proven to be useful tools in multi-field visualization, data analysis in various applications, and for accelerating extraction algorithms. On a triangulated grid, they can be calculated by a piecewise linear interpolation. In practice, Jacobi sets can become very complex and large due to noise and numerical errors. Some techniques for simplifying Jacobi sets exist, but these only reduce individual elements such as noise or are purely theoretical. These techniques often only change the visual representation of the Jacobi sets, but not the underlying data. In this paper, we present an algorithm that simplifies the Jacobi sets for 2D bivariate scalar fields and at the same time modifies the underlying bivariate scalar fields while preserving the essential structures of the fields. We use a neighborhood graph to select the areas to be reduced and collapse these cells individually. We investigate the influence of different neighborhood graphs and present an adaptation for the visualization of Jacobi sets that take the collapsed cells into account. We apply our algorithm to a range of analytical and real-world data sets and compare it with established methods that also simplify the underlying bivariate scalar fields.","accessible_pdf":false,"authors":[{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"raith@informatik.uni-leipzig.de","is_corresponding":true,"name":"Felix Raith"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"scheuermann@informatik.uni-leipzig.de","is_corresponding":false,"name":"Gerik Scheuermann"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"heine@informatik.uni-leipzig.de","is_corresponding":false,"name":"Christian Heine"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1033","image_caption":"Comparison of the calculated Jacobi sets in the Cylinder Flow dataset on the left side of the figure for the original dataset in the upper figure before simplification and the dataset in the lower figure after simplification with the collapse algorithm with threshold t = 0.0001. Furthermore, the corresponding neighborhood graphs are displayed on the right side. In this figure, the color corresponds to the orientation, red, positive orientation (det \u2207f(x) > 0), and blue, negative orientation (det \u2207f(x) < 0). The saturation indicates the range area. High saturation means a large range area, and vice versa for low saturation. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.08097","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1033/w-topoinvis-1033_Preview.mp4?token=pfMUjDzu2of7xxTP2BY9WDnTHYf8QGffCYIbJZ3DkIY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1033/w-topoinvis-1033_Preview.srt?token=RVqJGkHIIu7_Z84jZuTwdgvtrIlkMrEVatymxFiTaU0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"4KyGneBGdlY","session_youtube_ff_link":"https://youtu.be/4KyGneBGdlY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Topological Simplifcation of Jacobi Sets for Piecewise-Linear Bivariate 2D Scalar Fields","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1034","abstract":"The Morse-Smale complex is a standard tool in visual data analysis. The classic definition is based on a continuous view of the gradient of a scalar function where its zeros are the critical points. These points are connected via gradient curves and surfaces emanating from saddle points, known as separatrices. In a discrete setting, the Morse-Smale complex is commonly extracted by constructing a combinatorial gradient assuming the steepest descent direction. Previous works have shown that this method results in a geometric embedding of the separatrices that can be fundamentally different from those in the continuous case. To achieve a similar embedding, different approaches for constructing a combinatorial gradient were proposed. In this paper, we show that these approaches generate a different topology, i.e., the connectivity between critical points changes. Additionally, we demonstrate that the steepest descent method can compute topologically and geometrically accurate Morse-Smale complexes when applied to certain types of grids. Based on these observations, we suggest a method to attain both geometric and topological accuracy for the Morse-Smale complex of data sampled on a uniform grid.","accessible_pdf":false,"authors":[{"affiliations":["KTH Royal Institute of Technology, Stockholm, Sweden"],"email":"sonlt@kth.se","is_corresponding":true,"name":"Son Le Thanh"},{"affiliations":["KTH Royal Institute of Technology, Stockholm, Sweden"],"email":"ankele@iai.uni-bonn.de","is_corresponding":false,"name":"Michael Ankele"},{"affiliations":["KTH Royal Institute of Technology, Stockholm, Sweden"],"email":"weinkauf@kth.se","is_corresponding":false,"name":"Tino Weinkauf"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1034","image_caption":"Shown is the Morse-Smale complex of an analytic function representing a circle engraved in a tilted plane. It can be computed using the provably correct steepest descent method as shown by the orange lines. This method struggles to produce a geometric embedding similar to that of continuous topology, i.e. the circular shape. Although several approaches have been proposed to address this issue, in this paper, we show systematically that they generate different topologies. We show that geometrical and topological accuracy can be achieved by applying the steepest descent method on a modified grid structure, illustrated by the white lines.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.05532","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Revisiting Accurate Geometry for the Morse-Smale Complexes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1038","abstract":"This paper presents a nested tracking framework for analyzing cycles in 2D force networks within granular materials. These materials are composed of interacting particles, whose interactions are described by a force network. Understanding the cycles within these networks at various scales and their evolution under external loads is crucial, as they significantly contribute to the mechanical and kinematic properties of the system. Our approach involves computing a cycle hierarchy by partitioning the 2D domain into regions bounded by cycles in the force network. We can adapt concepts from nested tracking graphs originally developed for merge trees by leveraging the duality between this partitioning and the cycles. We demonstrate the effectiveness of our method on two force networks derived from experiments with photo-elastic disks.","accessible_pdf":true,"authors":[{"affiliations":["Link\u00f6ping University, Link\u00f6ping, Sweden"],"email":"farhan.rasheed@liu.se","is_corresponding":true,"name":"Farhan Rasheed"},{"affiliations":["Indian Institute of Science, Bangalore, India"],"email":"abrarnaseer@iisc.ac.in","is_corresponding":false,"name":"Abrar Naseer"},{"affiliations":["Link\u00f6ping university, Norrk\u00f6ping, Sweden"],"email":"emma.nilsson@liu.se","is_corresponding":false,"name":"Emma Nilsson"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"talha.bin.masood@liu.se","is_corresponding":false,"name":"Talha Bin Masood"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"ingrid.hotz@liu.se","is_corresponding":false,"name":"Ingrid Hotz"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1038","image_caption":"A tracking graph illustrating the development of cycles in a dynamic planar graph. Each column corresponds to a specific time point, with the nodes in the each column corresponding to a region encloues by a cycle in the partitioning of the underlying domain (shown at bottom). The color highlights the local development of the spatial system.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.06476","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1038/w-topoinvis-1038_Preview.mp4?token=ojyOF4m7qIFNwHfc3R2ZTN-NE8U_X12IDfVrSV0g9QE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-topoinvis/w-topoinvis-1038/w-topoinvis-1038_Preview.srt?token=IBiN8r08yS_XEtLFwQrAHLdQeEhgWj19vwFJvCtPvZo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"DhRmBk_dTns","session_youtube_ff_link":"https://youtu.be/DhRmBk_dTns","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Multi-scale Cycle Tracking in Dynamic Planar Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-topoinvis-1041","abstract":"Tetrahedral meshes are widely used due to their flexibility and adaptability in representing changes of complex geometries and topology. However, most existing data structures struggle to efficiently encode the irregular connectivity of tetrahedral meshes with billions of vertices.We address this problem by proposing a novel framework for efficient and scalable analysis of large tetrahedral meshes using Apache Spark. The proposed framework, called Tetra-Spark, features optimized approaches to locally compute many connectivity relations by first retrieving the Vertex-Tetrahedron (VT) relation. This strategy significantly improves Tetra-Spark's efficiency in performing morphology computations on large tetrahedral meshes.To prove the effectiveness and scalability of such a framework, we conduct a comprehensive comparison against a vanilla Spark implementation for the analysis of tetrahedral meshes. Our experimental evaluation shows that Tetra-Spark achieves up to a 78x speedup and reduces memory usage by up to 80% when retrieving connectivity relations with the VT relation available. This optimized design further accelerates subsequent morphology computations, resulting in up to a 47.7x speedup.","accessible_pdf":true,"authors":[{"affiliations":["University of Maryland, College Park, College Park, United States"],"email":"yhqian@umd.edu","is_corresponding":true,"name":"Yuehui Qian"},{"affiliations":["Clemson University, Clemson, United States"],"email":"guoxil@clemson.edu","is_corresponding":false,"name":"Guoxi Liu"},{"affiliations":["Clemson University, Clemson, United States"],"email":"fiurici@clemson.edu","is_corresponding":false,"name":"Federico Iuricich"},{"affiliations":["University of Maryland, College Park, United States"],"email":"deflo@umiacs.umd.edu","is_corresponding":false,"name":"Leila De Floriani"}],"award":"","doi":"","event_id":"w-topoinvis","event_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-topoinvis-1041","image_caption":"Figure: (a) The time cost (in minutes) for extracting connectivity relations and executing the algorithm in computing Forman gradient. (b) The peak memory consumption (in GB) for extracting relations. (c) The peak memory usage (in GB) for the entire computation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated3","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"TopoInVis: Workshop on Topological Data Analysis and Visualization","session_uid":"w-topoinvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["TopoInVis: Workshop on Topological Data Analysis and Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Efficient representation and analysis for a large tetrahedral mesh using Apache Spark","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1002","abstract":"Cuneiform is the earliest known system of writing, first developed for the Sumerian language of southern Mesopotamia in the second half of the 4th millennium BC. Cuneiform signs are obtained by impressing a stylus on fresh clay tablets. For certain purposes, e.g. authentication by seal imprint, some cuneiform tablets were enclosed in clay envelopes, which cannot be opened without destroying them. The aim of our interdisciplinary project is the non-invasive study of clay tablets. A portable X-ray micro-CT scanner is developed to acquire density data of such artifacts on a high-resolution, regular 3D grid at collection sites. The resulting volume data is processed through feature-preserving denoising, extraction of high-accuracy surfaces using a manifold dual marching cubes algorithm and extraction of local features by enhanced curvature rendering and ambient occlusion. For the non-invasive study of cuneiform inscriptions, the tablet is virtually separated from its envelope by curvature-based segmentation. The computational- and data-intensive algorithms are optimized for near-real-time offline usage with limited resources at collection sites. To visualize the complexity-reduced and octree-based compressed representation of surfaces, we develop and implement an interactive application. To facilitate the analysis of such clay tablets, we implement shape-based feature extraction algorithms to enhance cuneiform recognition. Our workflow supports innovative 3D display and interaction techniques such as autostereoscopic displays and gesture control.","accessible_pdf":true,"authors":[{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"stephan.olbrich@uni-hamburg.de","is_corresponding":false,"name":"Stephan Olbrich"},{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"andreas.beckert@uni-hamburg.de","is_corresponding":true,"name":"Andreas Beckert"},{"affiliations":["Centre National de la Recherche Scientifique (CNRS), Nanterre, France"],"email":"cecile.michel@cnrs.fr","is_corresponding":false,"name":"C\u00e9cile Michel"},{"affiliations":["Deutsches Elektronen-Synchrotron (DESY), Hamburg, Germany","Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"christian.schroer@desy.de","is_corresponding":false,"name":"Christian Schroer"},{"affiliations":["Deutsches Elektronen-Synchrotron (DESY), Hamburg, Germany","Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"samaneh.ehteram@desy.de","is_corresponding":false,"name":"Samaneh Ehteram"},{"affiliations":["Deutsches Elektronen-Synchrotron (DESY), Hamburg, Germany"],"email":"andreas.schropp@desy.de","is_corresponding":false,"name":"Andreas Schropp"},{"affiliations":["Deutsches Elektronen-Synchrotron (DESY), Hamburg, Germany"],"email":"philipp.paetzold@desy.de","is_corresponding":false,"name":"Philipp Paetzold"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1002","image_caption":"Virtual unpacking of an ancient clay tablet enclosed in another layer of clay. The surfaces are reconstructed from computed tomography data, which are acquired using a specially designed instrument developed for this purpose. The rendering of the reconstructed surfaces is refined with features such as enhanced curvature and ambient occlusion.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"http://arxiv.org/abs/2409.04236","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Efficient Analysis and Visualization of High-Resolution Computed Tomography Data for the Exploration of Enclosed Cuneiform Tablets","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1003","abstract":"Dimensionality reduction (DR) is a well-established approach for the visualization of high-dimensional data sets. While DR methods are often applied to typical DR benchmark data sets in the literature, they might suffer from high runtime complexity and memory requirements, making them unsuitable for large data visualization especially in environments outside of high-performance computing. To perform DR on large data sets, we propose the use of out-of-sample extensions.Such extensions allow inserting new data into existing projections, which we leverage to iteratively project data into a reference projection that consists only of a small manageable subset. This process makes it possible to perform DR out-of-core on large data, which would otherwise not be possible due to memory and runtime limitations. For metric multidimensional scaling (MDS), we contribute an implementation with out-of-sample projection capability since typical software libraries do not support it. We provide an evaluation of the projection quality of five common DR algorithms (MDS, PCA, t-SNE, UMAP, and autoencoders) using quality metrics from the literature and analyze the trade-off between the size of the reference set and projection quality. The runtime behavior of the algorithms is also quantified with respect to reference set size, out-of-sample batch size, and dimensionality of the data sets. Furthermore, we compare the out-of-sample approach to other recently introduced DR methods, such as PaCMAP and TriMAP, which claim to handle larger data sets than traditional approaches. To showcase the usefulness of DR on this large scale, we contribute a use case where we analyze ensembles of streamlines amounting to one billion projected instances.","accessible_pdf":false,"authors":[{"affiliations":["Universit\u00e4t Stuttgart, Stuttgart, Germany"],"email":"lucareichmann01@gmail.com","is_corresponding":false,"name":"Luca Marcel Reichmann"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"david.haegele@visus.uni-stuttgart.de","is_corresponding":false,"name":"David H\u00e4gele"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1003","image_caption":"The projections show the results of dimensionality reduction using the out-of-sample approach with data sets containing up to 50 million data points. In each column, the sizes of the reference set are increased. The size used for creating the initial reference projection are shown by the number above each plot. We show the results for popular dimensionality reduction techniques: MDS, PCA, t-SNE, UMAP, and autoencoder. The projections are evaluated using various quality metrics.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"https://arxiv.org/abs/2408.04129v1","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/a-ldav/a-ldav-1003/a-ldav-1003_Preview.mp4?token=PYJw_loXL4L1qAH_hlEUHMBLIR9img4Wuf2hw1VfGqw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/a-ldav/a-ldav-1003/a-ldav-1003_Preview.srt?token=wztCbEzC5jXSgn8_YRdZ6ycRd17DxhBRfIfdjynStm0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"Xm6e1eW5DxA","session_youtube_ff_link":"https://youtu.be/Xm6e1eW5DxA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Out-of-Core Dimensionality Reduction for Large Data via Out-of-Sample Extensions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1006","abstract":"Scientists generate petabytes of data daily to help uncover environmental trends or behaviors that are hard to predict. For example, understanding climate simulations based on the long-term average of temperature, precipitation, and other environmental variables is essential to predicting and establishing root causes of future undesirable scenarios and assessing possible mitigation strategies. Unfortunately, bottlenecks in petascale workflows restrict scientists' ability to analyze and visualize the necessary information due to requirements for extensive computational resources, obstacles in data accessibility, and inefficient analysis algorithms. This paper presents an approach to managing, visualizing, and analyzing petabytes of data within a browser on equipment ranging from the top NASA supercomputer to commodity hardware like a laptop. Our approach is based on a novel data fabric abstraction layer that allows querying scientific information in a form that is user-friendly while hiding the complexities of dealing with file systems or cloud services. We also optimize network utilization while streaming from petascale repositories through state-of-the-art progressive compression algorithms. Based on this abstraction, we provide customizable dashboards that can be accessed from any device with an internet connection, offering straightforward access to vast amounts of data typically not available to those without access to uniquely expensive hardware resources. Our dashboards provide and improve the ability to access and, more importantly, use massive data for a wide range of users, from top scientists with access to leadership-class computing environments to undergraduate students of disadvantaged backgrounds from minority-serving institutions. We focus on NASA's use of petascale climate datasets as an example of particular societal impact and, therefore, a case where achieving equity in science participation is critical. In particular, we validate our approach by improving the ability of climate scientist to explore their data even on the top NASA supercomputer, introducing the ability to study their data in a fully interactive environment instead of being limited to using pre-choreographed videos that can take days to generate each. We also successfully introduced the same dashboards and simplified training material in an undergraduate class on Geospatial Analysis in a minority-serving campus (Utah State Banding) with 69% of the Native American students and 86% being low-income. The same dashboards are also released in simplified form to the general public, providing an unparalleled democratization for the access and use of climate data that can be extended to most scientific domains.","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"aashishpanta0@gmail.com","is_corresponding":true,"name":"Aashish Panta"},{"affiliations":["Scientific Computing and Imaging Institute, Salt Lake City, United States"],"email":"xuanhuang@sci.utah.edu","is_corresponding":false,"name":"Xuan Huang"},{"affiliations":["NASA Ames Research Center, Mountain View, United States"],"email":"nina.mccurdy@gmail.com","is_corresponding":false,"name":"Nina McCurdy"},{"affiliations":["NASA, mountain View, United States"],"email":"david.ellsworth@nasa.gov","is_corresponding":false,"name":"David Ellsworth"},{"affiliations":["university of Utah, Salt lake city, United States"],"email":"amy.a.gooch@gmail.com","is_corresponding":false,"name":"Amy Gooch"},{"affiliations":["university of Utah, Salt lake city, United States"],"email":"scrgiorgio@gmail.com","is_corresponding":false,"name":"Giorgio Scorzelli"},{"affiliations":["NASA, Pasadena, United States"],"email":"hector.torres.gutierrez@jpl.nasa.gov","is_corresponding":false,"name":"Hector Torres"},{"affiliations":["caltech, Pasadena, United States"],"email":"pklein@caltech.edu","is_corresponding":false,"name":"Patrice Klein"},{"affiliations":["Utah State University Blanding, Blanding, United States"],"email":"gustavo.ovando@usu.edu","is_corresponding":false,"name":"Gustavo Ovando-Montejo"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"pascucci.valerio@gmail.com","is_corresponding":false,"name":"Valerio Pascucci"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1006","image_caption":"We provide unprecedented equitable access to massive data via our novel data fabric abstraction enabled by dashboards on commodity desktop computers with a simple weblink for everyone from top NASA scientists to students in disadvantaged communities to the general public. This image shows a field called Eastward Wind Velocity (U), combined together from a cubed-sphere grid. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"https://arxiv.org/abs/2408.11831v1","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Web-based Visualization and Analytics of Petascale data: Equity as a Tide that Lifts All Boats","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1011","abstract":"This paper describes the adaptation of a well-scaling parallel algorithm for computing Morse-Smale segmentations based on path compression to a distributed computational setting. Additionally, we extend the algorithm to efficiently compute connected components in distributed structured and unstructured grids, based either on the connectivity of the underlying mesh or a feature mask. Our implementation is seamlessly integrated with the distributed extension of the Topology ToolKit (TTK), ensuring robust performance and scalability. To demonstrate the practicality and efficiency of our algorithms, we conducted a series of scaling experiments on large-scale datasets, with sizes of up to 4096^3 vertices on up to 64 nodes and 768 cores.","accessible_pdf":false,"authors":[{"affiliations":["RPTU Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"mswill@rhrk.uni-kl.de","is_corresponding":true,"name":"Michael Will"},{"affiliations":["RPTU Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"jl@jluk.de","is_corresponding":false,"name":"Jonas Lukasczyk"},{"affiliations":["CNRS, Paris, France","Sorbonne Universit\u00e9, Paris, France"],"email":"julien.tierny@sorbonne-universite.fr","is_corresponding":false,"name":"Julien Tierny"},{"affiliations":["RPTU Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"garth@rptu.de","is_corresponding":false,"name":"Christoph Garth"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1011","image_caption":"Left: an illustration of using path compression to quickly compute the ascending / descending segmentations. Right: Illustrating the use of Connected Component extraction for data segmentation. Running these computations on multiple nodes allows us to use much larger datasets by using the distributed memory of all the nodes.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/a-ldav/a-ldav-1011/a-ldav-1011_Preview.mp4?token=YIVcce37MaN-COCzsAbFkZ34Fu6IKgmMjjUjQIjzuRA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/a-ldav/a-ldav-1011/a-ldav-1011_Preview.srt?token=v3vbBEIZLjIo9y9Trcw7LtJm2WnYO6W32DFzV3Q5W1Q&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"_9KgKT3__LM","session_youtube_ff_link":"https://youtu.be/_9KgKT3__LM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Distributed Path Compression for Piecewise Linear Morse-Smale Segmentations and Connected Components","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1016","abstract":"We propose and discuss a paradigm that allows for expressing data- parallel rendering with the classically non-parallel ANARI API. We propose this as a new standard for data-parallel rendering, describe two different implementations of this paradigm, and use multiple sample integrations into existing applications to show how easy it is to adopt, and what can be gained from doing so.","accessible_pdf":false,"authors":[{"affiliations":["NVIDIA, Salt Lake City, United States"],"email":"ingowald@gmail.com","is_corresponding":true,"name":"Ingo Wald"},{"affiliations":["University of Cologne, Cologne, Germany"],"email":"zellmann@uni-koeln.de","is_corresponding":false,"name":"Stefan Zellmann"},{"affiliations":["NVIDIA, Austin, United States"],"email":"jeffamstutz@gmail.com","is_corresponding":false,"name":"Jefferson Amstutz"},{"affiliations":["University of California, Davis, Davis, United States"],"email":"qadwu@ucdavis.edu","is_corresponding":false,"name":"Qi Wu"},{"affiliations":["NVIDIA, Santa Clara, United States"],"email":"kgriffin@nvidia.com","is_corresponding":false,"name":"Kevin Shawn Griffin"},{"affiliations":["VSB - Technical University of Ostrava, Ostrava, Czech Republic"],"email":"milan.jaros@vsb.cz","is_corresponding":false,"name":"Milan Jaro\u0161"},{"affiliations":["University of Cologne, Cologne, Germany"],"email":"wesner@uni-koeln.de","is_corresponding":false,"name":"Stefan Wesner"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1016","image_caption":"Several examples of large sci-vis data being rendered using the data-parallel ANARI paradigm proposed in this paper. From left to right: a) Roughly one billion color-mapped spheres, rendered using HayStack and BANARI. b) The roughly 500GB DNS data set, with volume path tracing on 128 GPUs, also using HayStack and BANARI. c) An iso-surface rendered during an in-situ Ascent session, while attached to an S3D simulation. d) ParaView performing data-parallel rendering on the airplane data set, using our data-parallel ANARI integration in pvserver. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Standardized Data-Parallel Rendering Using ANARI","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-ldav-1018","abstract":"Functional approximation as a high-order continuous representation provides a more accurate value and gradient query compared to the traditional discrete volume representation. Volume visualization directly rendered from functional approximation generates high-quality rendering results without high-order artifacts caused by trilinear interpolations. However, querying an encoded functional approximation is computationally expensive, especially when the input dataset is large, making functional approximation impractical for interactive visualization. In this paper, we proposed a novel functional approximation multi-resolution representation, Adaptive-FAM, which is lightweight and fast to query. We also design a GPU-accelerated out-of-core multi-resolution volume visualization framework that directly utilizes the Adaptive-FAM representation to generate high-quality rendering with interactive responsiveness. Our method can not only dramatically decrease the caching time, one of the main contributors to input latency, but also effectively improve the cache hit rate through prefetching. Our approach significantly outperforms the traditional function approximation method in terms of input latency while maintaining comparable rendering quality.","accessible_pdf":true,"authors":[{"affiliations":["University of Nebraska-Lincoln, Lincoln, United States"],"email":"jianxin.sun@huskers.unl.edu","is_corresponding":true,"name":"Jianxin Sun"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"dlenz@anl.gov","is_corresponding":false,"name":"David Lenz"},{"affiliations":["University of Nebraska-Lincoln, Lincoln, United States"],"email":"yu@cse.unl.edu","is_corresponding":false,"name":"Hongfeng Yu"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"tpeterka@mcs.anl.gov","is_corresponding":false,"name":"Tom Peterka"}],"award":"","doi":"","event_id":"a-ldav","event_title":"LDAV: 13th IEEE Symposium on Large Data Analysis and Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"a-ldav-1018","image_caption":"Adaptive-FAM is a novel functional approximation multi-resolution representation that is lightweight and fast to query. A GPU-accelerated out-of-core multi-resolution volume visualization framework is designed to directly utilize the Adaptive-FAM representation to generate high-quality rendering with interactive responsiveness. Our method can not only dramatically decrease the caching time, one of the main contributors to input latency, but also effectively improve the cache hit rate through prefetching. Our approach significantly outperforms the traditional function approximation method in terms of input latency while maintaining comparable rendering quality. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"https://arxiv.org/pdf/2409.00184","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/a-ldav/a-ldav-1018/a-ldav-1018_Preview.mp4?token=GL1VSSAmjgz3CWILLa9K7nXfw-Zi9CzBXtYPWYBHkJU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated4","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization","session_uid":"a-ldav","session_youtube_ff_id":"XCfEmhA78EI","session_youtube_ff_link":"https://youtu.be/XCfEmhA78EI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["LDAV: 14th IEEE Symposium on Large Data Analysis and Visualization"],"time_stamp":"2024-10-13T16:00:00Z","title":"Adaptive Multi-Resolution Encoding for Interactive Large-Scale Volume Visualization through Functional Approximation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1000","abstract":"Efficient public transport systems are crucial for sustainable urban development as cities face increasing mobility demands. Yet, many public transport networks struggle to meet diverse user needs due to historical development, urban constraints, and financial limitations. Traditionally, planning of transport network structure is often based on limited surveys, expert opinions, or partial usage statistics. This provides an incomplete basis for decision-making. We introduce an data-driven approach to public transport planning and optimization, calculating detailed accessibility measures at the individual housing level. Our visual analytics workflow combines population-group-based simulations with dynamic infrastructure analysis, utilizing a scenario-based model to simulate daily travel patterns of varied demographic groups, including schoolchildren, students, workers, and pensioners. These population groups, each with unique mobility requirements and routines, interact with the transport system under different scenarios traveling to and from Points of Interest (POI), assessed through travel time calculations. Results are visualized through heatmaps, density maps, and network overlays, as well as detailed statistics. Our system allows us to analyze both the underlying data and simulation results on multiple levels of granularity, delivering both broad insights and granular details. Case studies with the city of Konstanz, Germany reveal key areas where public transport does not meet specific needs, confirmed through a formative user study. Due to the high cost of changing legacy networks, our analysis facilitates the identification of strategic enhancements, such as optimized schedules or rerouting, and few targeted stop relocations, highlighting consequential variations in accessibility to pinpointing critical service gaps. Our research advances urban transport analytics by providing policymakers and citizens with a system that delivers both broad insights with granular detail into public transport services for a data-driven quality assessment at housing-level detail.","accessible_pdf":true,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"yannick.metz@uni-konstanz.de","is_corresponding":false,"name":"Yannick Metz"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"dennis-fabian.ackermann@uni-konstanz.de","is_corresponding":false,"name":"Dennis Ackermann"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"max.fischer@uni-konstanz.de","is_corresponding":true,"name":"Maximilian T. Fischer"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1000","image_caption":"Advancing urban transport infrastructure analysis through an interactive simulation framework of Mobility Profiles. This method integrates multi-source open data sources and integrates them with network flow simulations, encapsulated within an enriched map visualization to assess the quality - i.e. connectedness and travel times - of public transport at housing-level detail. Users can dynamically alter and explore mobility scenarios for various demographics, control the analysis through several components, and enhance the results with contextual background and network information. This enables interactive, systematic comparisons against diverse operational assumptions.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"https://arxiv.org/abs/2407.10791","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T16:55:00Z","title":"Interactive Public Transport Infrastructure Analysis through Mobility Profiles: Making the Mobility Transition Transparent","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1002","abstract":"This position paper explores the interplay between automation and human involvement in data science. It synthesizes perspectives from Automated Data Science (AutoDS) and Interactive Data Visualization (VIS), which traditionally represent opposing ends of the human-machine spectrum. While AutoDS aims to enhance efficiency by reducing human tasks, VIS emphasizes the importance of nuanced understanding, innovation, and context provided by human involvement. This paper examines these dichotomies through an online survey and advocates for a balanced approach that harmonizes the efficiency of automation with the irreplaceable insights of human expertise. Ultimately, we address the essential question of not just what we can automate, but what we should automate, seeking strategies that prioritize technological advancement alongside the fundamental need for human oversight.","accessible_pdf":false,"authors":[{"affiliations":["Tufts University, Boston, United States"],"email":"jen@cs.tufts.edu","is_corresponding":true,"name":"Jen Rogers"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, INRIA, Orsay, France"],"email":"mehdi.chakhchoukh@universite-paris-saclay.fr","is_corresponding":false,"name":"Mehdi Chakhchoukh"},{"affiliations":["Leiden Universiteit, Leiden, Netherlands"],"email":"anastacio@aim.rwth-aachen.de","is_corresponding":false,"name":"Marie Anastacio"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"rfaust1@tulane.edu","is_corresponding":false,"name":"Rebecca Faust"},{"affiliations":["University of Warwick, Coventry, United Kingdom"],"email":"cagatay.turkay@warwick.ac.uk","is_corresponding":false,"name":"Cagatay Turkay"},{"affiliations":["University of Wyoming, Laramie, United States"],"email":"larsko@uwyo.edu","is_corresponding":false,"name":"Lars Kotthoff"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"steffen.koch@vis.uni-stuttgart.de","is_corresponding":false,"name":"Steffen Koch"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"andreas.kerren@liu.se","is_corresponding":false,"name":"Andreas Kerren"},{"affiliations":["University of Zurich, Zurich, Switzerland"],"email":"bernard@ifi.uzh.ch","is_corresponding":false,"name":"J\u00fcrgen Bernard"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1002","image_caption":"The tug-of-war between automation and human involvement in data science: As automation technology advances, the balance between human intuition and machine efficiency becomes increasingly critical. Accessibility Description: An illustration of a tug-of-war between a robot on one side and three human figures on the other. The robot, representing automation, pulls one end of a rope while the human figures, symbolizing human involvement, pull from the opposite side. The image conveys the tension between automated processes and human input in data science.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1002/s-vds-1002_Preview.mp4?token=95WeM8irbKnh9Du-lE9XJTq36pdxWIujIGgIGk3Z1n4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1002/s-vds-1002_Preview.srt?token=8fssMCCBdK9YKDz1Yu5pDc-mAEXHleiX_h-nOSWcRrc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T16:45:00Z","title":"Visualization and Automation in Data Science: Exploring the Paradox of Humans-in-the-Loop","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1007","abstract":"Categorical data does not have an intrinsic definition of distance or order, and therefore, established visualization techniques for categorical data only allow for a set-based or frequency-based analysis, e.g., through Euler diagrams or Parallel Sets, and do not support a similarity-based analysis. We present a novel dimensionality reduction-based visualization for categorical data, which is based on defining the distance of two data items as the number of varying attributes. Our technique enables users to pre-attentively detect groups of similar data items and observe the properties of the projection, such as attributes strongly influencing the embedding. Our prototype visually encodes data properties in an enhanced scatterplot-like visualization, visualizing attributes in the background to show the distribution of categories. In addition, we propose two graph-based measures to quantify the plot's visual quality, which rank attributes according to their contribution to cluster cohesion. To demonstrate the capabilities of our similarity-based projection method, we compare it to Euler diagrams and Parallel Sets regarding visual scalability and evaluate it quantitatively on seven real-world datasets using a range of common quality measures. Further, we validate the benefits of our approach through an expert study with five data scientists analyzing the Titanic and Mushroom dataset with up to 23 attributes and 8124 category combinations. Our results indicate that our Categorical Data Map offers an effective analysis method for large datasets with a high number of category combinations.","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"frederik.dennig@uni-konstanz.de","is_corresponding":true,"name":"Frederik L. Dennig"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"lucas.joos@uni-konstanz.de","is_corresponding":false,"name":"Lucas Joos"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"patrick.paetzold@uni-konstanz.de","is_corresponding":false,"name":"Patrick Paetzold"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"blumbergdaniela@gmail.com","is_corresponding":false,"name":"Daniela Blumberg"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"oliver.deussen@uni-konstanz.de","is_corresponding":false,"name":"Oliver Deussen"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"max.fischer@uni-konstanz.de","is_corresponding":false,"name":"Maximilian T. Fischer"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1007","image_caption":"The Categorical Data Map enables projection-based analysis of categorical data here exemplified by the Property Sales dataset with MDS using the Jaccard coefficient: (1) shows 10 groups without layout enrichment. (2) shows a clear separation between Private Property vs Public Property. (3) indicates boundaries and symmetries for the Location of Purchased Property attribute, while in (4), the Property Type Purchased contributes the least to the clusters. The glyph sizes encode the subset sizes, revealing that categories Private Propriety and Central often occur together.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"https://arxiv.org/abs/2404.16044","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T17:45:00Z","title":"The Categorical Data Map: A Multidimensional Scaling-Based Approach","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1013","abstract":"Clustering is an essential technique across various domains, such as data science, machine learning, and eXplainable Artificial Intelligence.Information visualization and visual analytics techniques have been proven to effectively support human involvement in the visual exploration of clustered data to enhance the understanding and refinement of cluster assignments. This paper presents an attempt of a deep and exhaustive evaluation of the perceptive aspects of clustering quality metrics, focusing on the Davies-Bouldin Index, Dunn Index, Calinski-Harabasz Index, and Silhouette Score. Our research is centered around two main objectives: a) assessing the human perception of common CVIs in 2D scatterplots and b) exploring the potential of Large Language Models (LLMs), in particular GPT-4o, to emulate the assessed human perception. By discussing the obtained results, highlighting limitations, and areas for further exploration, this paper aims to propose a foundation for future research activities.","accessible_pdf":false,"authors":[{"affiliations":["Sapienza University of Rome, Rome, Italy"],"email":"blasilli@diag.uniroma1.it","is_corresponding":true,"name":"Graziano Blasilli"},{"affiliations":["Northeastern University, Boston, United States"],"email":"kerrigan.d@northeastern.edu","is_corresponding":false,"name":"Daniel Kerrigan"},{"affiliations":["Northeastern University, Boston, United States"],"email":"e.bertini@northeastern.edu","is_corresponding":false,"name":"Enrico Bertini"},{"affiliations":["Sapienza University of Rome, Rome, Italy"],"email":"santucci@diag.uniroma1.it","is_corresponding":false,"name":"Giuseppe Santucci"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1013","image_caption":"This paper presents the first attempt of a deep and exhaustive evaluation of the perceptive aspects of clustering quality metrics, focusing on the Davies-Bouldin Index, Dunn Index, Calinski-Harabasz Index, and Silhouette Score. Our research is centered around two main objectives: a) assessing the human perception of the metrics in 2D scatterplots and b) exploring the potential of Large Multimodal Models, in particular GPT-4o, to emulate the assessed human perception.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T17:05:00Z","title":"Towards a Visual Perception-Based Analysis of Clustering Quality Metrics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1021","abstract":"Recommender systems have become integral to digital experiences, shaping user interactions and preferences across various platforms. Despite their widespread use, these systems often suffer from algorithmic biases that can lead to unfair and unsatisfactory user experiences. This study introduces an interactive tool designed to help users comprehend and explore the impacts of algorithmic harms in recommender systems. By leveraging visualizations, counterfactual explanations, and interactive modules, the tool allows users to investigate how biases such as miscalibration, stereotypes, and filter bubbles affect their recommendations. Informed by in-depth user interviews, both general users and researchers can benefit from increased transparency and personalized impact assessments, ultimately fostering a better understanding of algorithmic biases and contributing to more equitable recommendation outcomes. This work provides valuable insights for future research and practical applications in mitigating bias and enhancing fairness in machine learning algorithms.","accessible_pdf":false,"authors":[{"affiliations":["University of Pittsburgh, Pittsburgh, United States"],"email":"yongsu.ahn@pitt.edu","is_corresponding":true,"name":"Yongsu Ahn"},{"affiliations":["School of Computing and Information, University of Pittsburgh, Pittsburgh, United States"],"email":"quinnkwolter@gmail.com","is_corresponding":false,"name":"Quinn K Wolter"},{"affiliations":["Quest Diagnostics, Pittsburgh, United States"],"email":"jonilyndick@gmail.com","is_corresponding":false,"name":"Jonilyn Dick"},{"affiliations":["Quest Diagnostics, Pittsburgh, United States"],"email":"janetad99@gmail.com","is_corresponding":false,"name":"Janet Dick"},{"affiliations":["University of Pittsburgh, Pittsburgh, United States"],"email":"yurulin@pitt.edu","is_corresponding":false,"name":"Yu-Ru Lin"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1021","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1021/s-vds-1021_Preview.mp4?token=Gd9gotKCklMf66hdSrwI7pFZqqLfFnl_iLa5KU-lz50&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1021/s-vds-1021_Preview.srt?token=XCnK0MnML8SnYP-fJnKGuB59POVVoqT0E1qUtcyFcQA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"Q4PRivOX2CQ","session_youtube_ff_link":"https://youtu.be/Q4PRivOX2CQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T17:55:00Z","title":"Interactive Counterfactual Exploration of Algorithmic Harms in Recommender Systems","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"s-vds-1029","abstract":"This position paper discusses the profound impact of Large Language Models (LLMs) on semantic change, emphasizing the need for comprehensive monitoring and visualization techniques. Building on established concepts from linguistics, we examine the interdependency between mental and language models, discussing how LLMs influence and are influenced by human cognition and societal context. We introduce three primary theories to conceptualize such influences: Recontextualization, Standardization, and Semantic Dementia, illustrating how LLMs drive, standardize, and potentially degrade language semantics.Our subsequent review categorizes methods for visualizing semantic change into frequency-based, embedding-based, and context-based techniques, being first in assessing their effectiveness in capturing linguistic evolution: Embedding-based methods are highlighted as crucial for a detailed semantic analysis, reflecting both broad trends and specific linguistic changes. We underscore the need for novel visual, interactive tools to monitor and explain semantic changes induced by LLMs, ensuring the preservation of linguistic diversity and mitigating linguistic biases. This work provides essential insights for future research on semantic change visualization and the dynamic nature of language evolution in the times of LLMs.","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"raphael.buchmueller@uni-konstanz.de","is_corresponding":true,"name":"Raphael Buchm\u00fcller"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"friederike.koerte@uni-konstanz.de","is_corresponding":false,"name":"Friederike K\u00f6rte"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"}],"award":"","doi":"","event_id":"s-vds","event_title":"VDS: Visualization in Data Science Symposium","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"s-vds-1029","image_caption":"Hi, and thanks for joining. In a nutshell, our research looks at how Large Language Models are reshaping the conceptual framework of our language. While language change has traditionally been driven by socio-linguistic factors like metaphorization, we introduce three new ideas: recontextualization, standardization, and what we call semantic dementia. Using visual analytics, we can track these shifts to preserve linguistic diversity and reduce bias. We review key methods, like embedding-based techniques, to detect and explain these changes. In the end, we call for new visualization tools to better understand how LLMs are impacting our language. Thanks for watching.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1029/s-vds-1029_Preview.mp4?token=t9ktK8xhtnb0aavzIMa18Y4uVFntG62g5EOSZEZ1gfE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/s-vds/s-vds-1029/s-vds-1029_Preview.srt?token=PAuO0koDnAkjI8Xm8CFJWMVlQws48T_83jbrIiX_mHU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"VDS: Visualization in Data Science Symposium","session_uid":"s-vds","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VDS: Visualization in Data Science Symposium"],"time_stamp":"2024-10-13T18:05:00Z","title":"Seeing the Shift: Keep an Eye on Semantic Changes in Times of LLMs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1008","abstract":"Stress is among the most commonly employed quality metrics and optimization criteria for dimension reduction projections of high-dimensional data. Complex, high-dimensional data is ubiquitous across many scientific disciplines, including machine learning, biology, and the social sciences. One of the primary methods of visualizing these datasets is with two-dimensional scatter plots that visually capture some properties of the data. Because visually determining the accuracy of these plots is challenging, researchers often use quality metrics to measure the projection\u2019s accuracy or faithfulness to the full data. One of the most commonly employed metrics, normalized stress, is sensitive to uniform scaling (stretching, shrinking) of the projection, despite this act not meaningfully changing anything about the projection. We investigate the effect of scaling on stress and other distance-based quality metrics analytically and empirically by showing just how much the values change and how this affects dimension reduction technique evaluations. We introduce a simple technique to make normalized stress scale-invariant and show that it accurately captures expected behavior on a small benchmark.","accessible_pdf":true,"authors":[{"affiliations":["University of Arizona, Tucson, United States"],"email":"ksmelser@arizona.edu","is_corresponding":false,"name":"Kiran Smelser"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"jacobmiller1@arizona.edu","is_corresponding":true,"name":"Jacob Miller"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"stephen.kobourov@tum.de","is_corresponding":false,"name":"Stephen Kobourov"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1008","image_caption":"MDS, t-SNE, and RND (random) embeddings of the well-known Iris dataset from left to right (bottom). The plot (top) shows the values of the normalized stress metric for these three embeddings and clearly illustrates the sensitivity to scale. As one uniformly scales the embeddings to be larger or smaller, the value of normalized stress changes. Notably, at different scales, different embeddings have lower stress, including the absurd situation where the random embedding has the lowest stress (beyond scale 9). Moreover, the expected order of MDS, t-SNE, RND is only found briefly at a scalar value slightly greater than 0.25 (hardly visible in the plot), and all six different algorithm orders can be found by selecting different scales. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.07724","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KUbbe0PguKY&t=1h58m57s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-beliv/w-beliv-1008/w-beliv-1008_Preview.mp4?token=Um2oJjnT2y3HO9IljRJKeSSUa93W3n11qrkN3dI3dAs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-beliv/w-beliv-1008/w-beliv-1008_Preview.srt?token=okgeJ3fLQmb9x7DcRRyrZCL_4cYRm7_qRC2KHtcoOXY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"p1bNgrfXToY","session_youtube_ff_link":"https://youtu.be/p1bNgrfXToY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=1h58m57s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Normalized Stress is Not Normalized: How to Interpret Stress Correctly","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1015","abstract":"Empirical studies in visualisation often compare visual representations to identify the most effective visualisation for a particular visual judgement or decision making task. However, the effectiveness of a visualisation may be intrinsically related to, and difficult to distinguish from, factors such as visualisation literacy. Complicating matters further, visualisation literacy itself is not a singular intrinsic quality, but can be a result of several distinct challenges that a viewer encounters when performing a task with a visualisation. In this paper, we describe how such challenges apply to experiments that we use to evaluate visualisations, and discuss a set of considerations for designing studies in the future. Finally, we argue that aspects of the study design which are often neglected or overlooked (such as the onboarding of participants, tutorials, training etc.) can have a big role in the results of a study and can potentially impact the conclusions that the researchers can draw from the study.","accessible_pdf":false,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"abhraneel@u.northwestern.edu","is_corresponding":true,"name":"Abhraneel Sarma"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"shenglong@u.northwestern.edu","is_corresponding":false,"name":"Sheng Long"},{"affiliations":["Northeastern University, Portland, United States"],"email":"m.correll@northeastern.edu","is_corresponding":false,"name":"Michael Correll"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1015","image_caption":"The 'Telephone' framework describes two possible pathways of participants\u2019 behaviour in experiments. In the desired pathway, a user performs the experimental task using the optimal strategy, allowing the researcher to estimate a measure of visualisation effectiveness. However, this desired pathway may not always manifest in practice. What an experiment instead might be measuring is described through the alternative pathway\u2014a user performs what they think the task is, using a strategy which they think best supports this perceived task; the experiment is actually measuring how well the visualisation supports a user in performing their perceived task using their perceived optimal strategy.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/d849a","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KUbbe0PguKY&t=1h47m58s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=1h47m58s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Tasks and Telephone: Understanding Barriers to Inference due to Issues in Experiment Design","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1016","abstract":"This position paper critically examines the graphical inference framework for evaluating visualizations using the lineup task. We present a re-analysis of lineup task data using signal detection theory, applying four Bayesian non-linear models to investigate whether color ramps with more color name variation increase false discoveries. Our study utilizes data from Reda and Szafir\u2019s previous work [20], corroborating their findings while providing additional insights into sensitivity and bias differences across colormaps and individuals. We suggest improvements to lineup study designs and explore the connections between graphical inference, signal detection theory, and statistical decision theory. Our work contributes a more perceptually grounded approach for assessing visualization effectiveness and offers a path forward for better aligning graphical inference methods with human cognition. The results have implications for the development and evaluation of visualizations, particularly for exploratory data analysis scenarios. Supplementary materials are available at https://osf.io/xd5cj/.","accessible_pdf":false,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"shenglong@u.northwestern.edu","is_corresponding":true,"name":"Sheng Long"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1016","image_caption":"The image connects a visual lineup task with signal detection theory. It shows a lineup of multivariate images where participants identify if one differs or if there's \"no discernible difference.\" Signal detection theory analyzes this data, assuming perceptual evidence for signal presence/absence as overlapping probability distributions. This quantifies observer sensitivity and decision criterion, separating perceptual sensitivity from response bias. The graphs illustrate concepts like false alarm rate, hit rate, and sensitivity (d'), demonstrating how the theory applies to perceptual decision-making in visual discrimination tasks.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/ghru8","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KUbbe0PguKY&t=1h36m44s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=1h36m44s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Old Wine in a New Bottle? Analysis of Visual Lineups with Signal Detection Theory","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1018","abstract":"Visualising personal experiences is often described as a means for self-reflection, shaping one\u2019s identity, and sharing it with others. In policymaking, personal narratives are regarded as an important source of intelligence to shape public discourse andpolicy. Therefore, policymakers are interested in the interplay between individual-level experiences and macro-political processes that play into shaping these experiences. In this context, visualisation is regarded as a medium for advocacy, creating a power balance between individuals and the power structures that influence their health and well-being. In this paper, we offer a politically-framed reflection on how visualisation creators define lived experience data, and what design choices they make for visualising them. We identify data characteristics and design choices that enable visualisation authors and consumers to engage in a process of narrative co-construction, while navigating structural forms of inequality. Our political framing is driven by ideas of master and alternative narratives from Diversity Science, in which authors and narrators engage in a process of negotiation with power structures to either maintain or challenge the status quo.","accessible_pdf":true,"authors":[{"affiliations":["City, University of London, London, United Kingdom"],"email":"mai.elshehaly@city.ac.uk","is_corresponding":true,"name":"Mai Elshehaly"},{"affiliations":["City, University of London, London, United Kingdom"],"email":"mirela.reljan-delaney@city.ac.uk","is_corresponding":false,"name":"Mirela Reljan-Delaney"},{"affiliations":["City, University of London, London, United Kingdom"],"email":"j.dykes@city.ac.uk","is_corresponding":false,"name":"Jason Dykes"},{"affiliations":["City, University of London, London, United Kingdom"],"email":"a.slingsby@city.ac.uk","is_corresponding":false,"name":"Aidan Slingsby"},{"affiliations":["City, University of London, London, United Kingdom"],"email":"j.d.wood@city.ac.uk","is_corresponding":false,"name":"Jo Wood"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"sam.spiegel@ed.ac.uk","is_corresponding":false,"name":"Sam Spiegel"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1018","image_caption":"The Master Narrative Framework for Visualization, which may be useful for exposing Master narratives, developing Alternative narratives and establishing Personal narratives in visualization design, critique and education. Adapted from Syed and McLean [42]. We argue that the contrast between mater, alternative, and personal narratives can better define the role of visualisation in advocacy and shaping policy. We use Wee People in this figure, a typeface of people silhouettes https://github.com/propublica/weepeople .","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://invisai.com/mai/papers/Visualising_Lived_Experience-beliv2024.pdf","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KUbbe0PguKY&t=1h7m52s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=1h7m52s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Visualising Lived Experience: Learning from a Master andAlternative Narrative Framing","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1020","abstract":"The generation and presentation of counterfactual explanations (CFEs) are a commonly used, model-agnostic approach to helping end-users reason about the validity of AI/ML model outputs. By demonstrating how sensitive the model's outputs are to minor variations, CFEs are thought to improve understanding of the model's behavior, identify potential biases, and increase the transparency of 'black box models'.Here, we examine how CFEs support a diverse audience, both with and without technical expertise, to understand the results of an LLM-informed sentiment analysis. We conducted a preliminary pilot study with ten individuals with varied expertise from rangingNLP, ML, and ethics, to specific domains. All individuals were actively using or working with AI/ML technology as part of their daily jobs. Through semi-structured interviews grounded in a set of concrete examples, we examined how CFEs influence participants' perceptions of the model's correctness, fairness, and trustworthiness, and how visualization of CFEs specifically influences those perceptions. We also surface how participants wrestle with their internal definitions of `explainability', relative to what CFEs present, their cultures, and backgrounds, in addition to the, much more widely studied phenomena, of comparing their baseline expectations of the model's performance. Compared to prior research, our findings highlight the sociotechnical frictions that CFEs surface but do not necessarily remedy. We conclude with the design implications of developing transparent AI/ML visualization systems for more general tasks.","accessible_pdf":false,"authors":[{"affiliations":["Tableau Research, Seattle, United States"],"email":"amcrisan@uwaterloo.ca","is_corresponding":true,"name":"Anamaria Crisan"},{"affiliations":["Tableau Software, Seattle, United States"],"email":"nbutters@salesforce.com","is_corresponding":false,"name":"Nathan Butters"},{"affiliations":["Tableau Software, Seattle, United States"],"email":"zoezoezoe.cc@gmail.com","is_corresponding":false,"name":"Zoe Zoe"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-beliv-1020","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KUbbe0PguKY&t=1h20m52s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=1h20m52s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Exploring Subjective Notions of Explainability through Counterfactual Visualization of Sentiment Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1037","abstract":"Qualitative data analysis is widely adopted for user evaluation, not only in the Visualisation community but also related communities, such as Human-Computer Interaction and Augmented and Virtual Reality. However, the data analysis process is often not clearly described and the results are often simply listed in the form of interesting quotes from or summaries of quotes that were uttered by study participants. This position paper proposes an early concept for the use of a researcher as an \u201cAdvocatus Diaboli\u201d, or devil\u2019s advocate, to try to disprove the results of the data analysis by looking for quotes that contradict the findings or leading questions and task designs. Whatever this devil\u2019s advocate finds can then be used to reiterate on the findings and the analysis process to form more suitable theories. On the other hand, researchers are enabled to clarify why they did not include this in their theory. This process could increase transparency in the qualitative data analysis process and increase trust in these findings, while being mindful of the necessary resources.","accessible_pdf":false,"authors":[{"affiliations":["University of Applied Sciences Upper Austria, Hagenberg, Austria"],"email":"judith.friedl-knirsch@fh-hagenberg.at","is_corresponding":true,"name":"Judith Friedl-Knirsch"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1037","image_caption":"A sketch of the Advocatus Diaboli process for qualitative data analysis. First, the primary researcher analyses the collected data. Then a secondary researcher assumes the position of an Advocatus Diaboli and attempts to disprove the findings of the primary researcher based on the collected data. Finally, both researchers discuss the findings of the Advocatus Diaboli and adapt the results if necessary.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KUbbe0PguKY&t=0h57m3s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6a","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KUbbe0PguKY&t=0h57m3s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Session 1)"],"time_stamp":"2024-10-14T12:30:00Z","title":"Position paper: Proposing the use of an \u201cAdvocatus Diaboli\u201d as a pragmatic approach to improve transparency in qualitative data analysis and reporting","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1001","abstract":"I analyze the evolution of papers certified by the Graphics Replicability Stamp Initiative (GRSI) to be reproducible, with a specific focus on the subset of publications that address visualization-related topics. With this analysis I show that, while the number of papers is increasing overall and within the visualization field, we still have to improve quite a bit to escape the replication crisis. I base my analysis on the data published by the GRSI as well as publication data for the different venues in visualization and lists of journal papers that have been presented at visualization-focused conferences. I also analyze the differences between the involved journals as well as the percentage of reproducible papers in the different presentation venues. Furthermore, I look at the authors of the publications and, in particular, their affiliation countries to see where most reproducible papers come from. Finally, I discuss potential reasons for the low reproducibility numbers and suggest possible ways to overcome these obstacles. This paper is reproducible itself, with source code and data available from github.com/tobiasisenberg/Visualization-Reproducibility as well as a free paper copy and all supplemental materials at osf.io/mvnbj.","accessible_pdf":false,"authors":[{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tobias.isenberg@gmail.com","is_corresponding":true,"name":"Tobias Isenberg"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1001","image_caption":"In my paper I analyze the evolution of reproducible contributions to the graphics and visualization fields as certified by the Graphics Replicability Stamp Initiative. I focus specifically on the visualization field and discuss reasons for the still relatively low counts of reproducible papers.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.03889","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/iXPK-CWIysQ&t=1h6m53s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-beliv/w-beliv-1001/w-beliv-1001_Preview.mp4?token=v4kN7_ZczcC7CarhJ9s2gX88LaEgPkpOIuGiLWAvREA&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"CJ9FIt62O5o","session_youtube_ff_link":"https://youtu.be/CJ9FIt62O5o","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=1h6m53s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"The State of Reproducibility Stamps for Visualization Research Papers","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1004","abstract":"In the rapidly evolving field of information visualization, rigorous evaluation is essential for validating new techniques, understanding user interactions, and demonstrating the effectiveness of visualizations. The evaluation of visualization systems is fundamental to ensuring their effectiveness, usability, and impact. Faithful evaluations provide valuable insights into how users interact with and perceive the system, enabling designers to make informed decisions about design choices and improvements. However, an emerging trend of multiple evaluations within a single study raises critical questions about the sustainability, feasibility, and methodological rigor of such an approach. So, how many evaluations are enough? is a situational question and cannot be formulaically determined. Our objective is to summarize current trends and patterns to understand general practices across different contribution and evaluation types. New researchers and students, influenced by this trend, may believe-- multiple evaluations are necessary for a study. However, the number of evaluations in a study should depend on its contributions and merits, not on the trend of including multiple evaluations to strengthen a paper. In this position paper, we identify this trend through a non-exhaustive literature survey of TVCG papers from issue 1 in 2023 and 2024. We then discuss various evaluation strategy patterns in the information visualization field and how this paper will open avenues for further discussion.","accessible_pdf":true,"authors":[{"affiliations":["University of North Carolina at Chapel Hill, Chapel Hill, United States"],"email":"flin@unc.edu","is_corresponding":false,"name":"Feng Lin"},{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"zeyuwang@cs.unc.edu","is_corresponding":true,"name":"Arran Zeyu Wang"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"dilshadur@sci.utah.edu","is_corresponding":false,"name":"Md Dilshadur Rahman"},{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"danielle.szafir@cs.unc.edu","is_corresponding":false,"name":"Danielle Albers Szafir"},{"affiliations":["University of Oklahoma, Norman, United States"],"email":"quadri@ou.edu","is_corresponding":false,"name":"Ghulam Jilani Quadri"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1004","image_caption":"(Left) Distribution of four evaluation methods (quantitative, qualitative, case study, and mixed methods) across 214 papers, showing whether each type was not utilized, used once, or used multiple times within single study. (Middle) Venn diagram showing the overlap of papers using quantitative, qualitative, and case study evaluations. (Right) Grouped bar chart of the proportion of five paper categories (experimental, survey, system, application, and technique), illustrating the distribution of evaluation methods used in each category. Quantitative and case studies are common in technique papers, while experimental papers often use both quantitative and qualitative methods.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://www.arxiv.org/abs/2408.16080","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/iXPK-CWIysQ&t=2h2m49s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=2h2m49s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Striking the Right Balance: Systematic Assessment of Evaluation Method Distribution Across Contribution Types","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1005","abstract":"Various standardized tests exist that assess individuals' visualization literacy. Their use can help to draw conclusions from studies. However, it is not taken into account that the test itself can create a pressure situation where participants might fear being exposed and assessed negatively. This is especially problematic when testing domain experts in design studies. We conducted interviews with experts from different domains performing the Mini-VLAT test for visualization literacy to identify potential problems. Our participants reported that the time limit per question, ambiguities in the questions and visualizations, and missing steps in the test procedure mainly had an impact on their performance and content. We discuss possible changes to the test design to address these issues and how such assessment methods could be integrated into existing evaluation procedures.","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"seyda.oeney@visus.uni-stuttgart.de","is_corresponding":true,"name":"Seyda \u00d6ney"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"moataz.abdelaal@visus.uni-stuttgart.de","is_corresponding":false,"name":"Moataz Abdelaal"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"kuno.kurzhals@visus.uni-stuttgart.de","is_corresponding":false,"name":"Kuno Kurzhals"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"paul.betz@sowi.uni-stuttgart.de","is_corresponding":false,"name":"Paul Betz"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"cordula.kropp@sowi.uni-stuttgart.de","is_corresponding":false,"name":"Cordula Kropp"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1005","image_caption":"Domain experts may be asked to take the Mini-VLAT test to assess their visualization skills. However, factors such as the time limit on each question could cause stress, potentially affecting their performance.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.08101","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/iXPK-CWIysQ&t=1h23m38s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=1h23m38s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Testing the Test: Observations When Assessing Visualization Literacy of Domain Experts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1007","abstract":"In visualization, the process of transforming raw data into visually comprehensible representations is pivotal. While existing models like the Information Visualization Reference Model describe the data-to-visual mapping process, they often overlook a crucial intermediary step: design-specific transformations. This process, occurring after data transformation but before visual-data mapping, further derives data, such as groupings, layout, and statistics, that are essential to properly render the visualization. In this paper, we advocate for a deeper exploration of design-specific transformations, highlighting their importance in understanding visualization properties, particularly in relation to user tasks. We incorporate design-specific transformations into the Information Visualization Reference Model and propose a new formalism that encompasses the user task as a function over data. The resulting formalism offers three key benefits over existing visualization models: (1) describing tasks as compositions of functions, (2) enabling analysis of data transformations for visual-data mapping, and (3) empowering reasoning about visualization correctness and effectiveness. We further discuss the potential implications of this model on visualization theory and visualization experiment design.","accessible_pdf":false,"authors":[{"affiliations":["Columbia University, New York City, United States"],"email":"ewu@cs.columbia.edu","is_corresponding":true,"name":"eugene Wu"},{"affiliations":["Tufts University, Medford, United States"],"email":"remco@cs.tufts.edu","is_corresponding":false,"name":"Remco Chang"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1007","image_caption":"We propose to extend the Infovis Reference Model to explicitly model the role of design-specific data transformations in visualization design. This model decomposes visual mappings into design-specific transformations (e.g., stacking, quantization, calculating statistics) and a visual encoding. We further propose to model tasks as functions over the input data that the user wishes to estimate using the visualization. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/iXPK-CWIysQ&t=0h25m34s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h25m34s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Design-Specific Transforms In Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1009","abstract":"The cognitive processes involved in understanding and misunderstanding visualizations have not yet been fully clarified, even for well-studied designs, such as bar charts. In particular, little is known about whether viewers can improve their learning processes by getting better insight into their own cognition. This paper describes a simple method to measure the role of such metacognitive understanding when learning to read bar charts. For this purpose, we conducted an experiment in which we investigated bar chart learning repeatedly, and tested how learning over trials was effected by metacognitive understanding. We integrate the findings into a model of metacognitive processing of visualizations, and discuss implications for the design of visualizations.","accessible_pdf":false,"authors":[{"affiliations":["Heidelberg University, Heidelberg, Germany"],"email":"antonia.schlieder@t-online.de","is_corresponding":true,"name":"Antonia Schlieder"},{"affiliations":["Heidelberg University, Heidelberg, Germany"],"email":"jan.rummel@psychologie.uni-heidelberg.de","is_corresponding":false,"name":"Jan Rummel"},{"affiliations":["Ruprecht-Karls-Universit\u00e4t Heidelberg, Heidelberg, Germany"],"email":"palbers@mathi.uni-heidelberg.de","is_corresponding":false,"name":"Peter Albers"},{"affiliations":["Heidelberg University, Heidelberg, Germany"],"email":"sadlo@uni-heidelberg.de","is_corresponding":false,"name":"Filip Sadlo"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1009","image_caption":"Metacognition is a feature of the cognitive system to monitor and control its cognitive processes. Consequently, one can describe metacognition as the human ability to reflect, to think about thinking, and to adapt our thinking when we deem it necessary. Truncating the y-axis of a bar chart can make the visualization deceptive in terms of certain visual reasoning tasks. In an experiment, we show that metacognitive processes are involved in understanding deceptive bar charts, i.e., that reasoners who are able to reflect on and adjust their strategies can improve their performance even without feedback on the correctness of their answers.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://vcg.iwr.uni-heidelberg.de/publications/pubdetails/Schlieder2024metacognition/","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/iXPK-CWIysQ&t=0h2m48s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h2m48s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"The Role of Metacognition in Understanding Deceptive Bar Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1021","abstract":"The replication crisis has spawned a revolution in scientific methods, aimed at increasing the transparency, robustness, and reliability of scientific outcomes. In particular, the practice of preregistering study designs has shown important advantages. Preregistration can help limit questionable research practices, as well as increase the success rate of study replications. Many fields have now adopted preregistration as a default expectation for published studies. In 2022, we set up a panel ``Merits and Limits of User Study Preregistration'' with the overall goal of explaining the concept of preregistration to a wide VIS audience and discussing its suitability for visualization research. We report on the arguments and discussion of this panel in the hope that it can benefit the visualization community at large.All materials and a copy of this paper are available on our OSF repository at https://osf.io/wes57/.","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"lonni.besancon@gmail.com","is_corresponding":true,"name":"Lonni Besan\u00e7on"},{"affiliations":["University of Virginia, Charlottesville, United States"],"email":"nosek@virginia.edu","is_corresponding":false,"name":"Brian Nosek"},{"affiliations":["Tilburg University, Tilburg, Netherlands"],"email":"t.l.haven@tilburguniversity.edu","is_corresponding":false,"name":"Tamarinde Haven"},{"affiliations":["Link\u00f6ping University, N\u00f6rrkoping, Sweden"],"email":"miriah.meyer@liu.se","is_corresponding":false,"name":"Miriah Meyer"},{"affiliations":["Northeastern University, Boston, United States"],"email":"c.dunne@northeastern.edu","is_corresponding":false,"name":"Cody Dunne"},{"affiliations":["Luxembourg Institute of Science and Technology, Belvaux, Luxembourg"],"email":"mohammad.ghoniem@gmail.com","is_corresponding":false,"name":"Mohammad Ghoniem"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1021","image_caption":"In this position paper, we summarize the 2022 panel's discussions and arguments for the wider visualization and human-computer interaction community, point to useful resources, and discuss implications along with any needed community-driven efforts. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/n7ej3","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/iXPK-CWIysQ&t=0h58m13s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h58m13s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Merits and Limits of Preregistration for Visualization Research","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1026","abstract":"Despite 30+ years of academic practice, visualization still lacks an explanation of how and why it functions in complex organizations performing knowledge work. This survey examines the intersection of organizational studies and visualization design, highlighting the concept of \\textit{boundary objects}, which visualization practitioners are adopting in both CSCW (computer-supported collaborative work) and HCI. This paper also collects the prior literature on boundary objects in visualization design studies, a methodology which maps closely to action research in organizations, and addresses the same problems of `knowing in common'. Process artifacts generated by visualization design studies function as boundary objects in their own right, facilitating knowledge transfer across disciplines within an organization. Currently, visualization faces the challenge of explaining how sense-making functions across domains, through visualization artifacts, and how these support decision-making. As a deeply interdisciplinary field, visualization should adopt the theory of boundary objects in order to embrace its plurality of domains and systems, whilst empowering its practitioners with a unified process-based theory.","accessible_pdf":false,"authors":[{"affiliations":["UC Santa Cruz, Santa Cruz, United States"],"email":"jtotto@ucsc.edu","is_corresponding":false,"name":"Jasmine Tan Otto"},{"affiliations":["California Institute of Technology, Pasadena, United States"],"email":"sd@scottdavidoff.com","is_corresponding":false,"name":"Scott Davidoff"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1026","image_caption":"A `transit network' map of knowledge transfer in complex organizations. Each station represents a stakeholder group. Each line represents a single vertical, pipeline, or other system along which visualization artifacts (and other data products) may flow, acting as vehicles for organizational knowledge. In this example, the Relay, Robotics, and Science Mission groups each include various domain experts and decision-makers; the HCI vertical includes both visualization practitioners (Design and Visualization) and their close-collaborator domain experts (Staffing and Allocation). In this analogy, the task of visualization theory is not just to provide artifacts which serve as `vehicles for knowledge', nor only to identify systems through which knowledge flows, but also to discover processes which explain who shares knowledge, where it needs to go, and why it is (not) getting there. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/9f5ub","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/iXPK-CWIysQ&t=0h48m49s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h48m49s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Visualization Artifacts are Boundary Objects","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1027","abstract":"Foundation models for vision and language are the basis of AI applications across numerous sectors of society. The success of these models stems from their ability to mimic human capabilities, namely visual perception in vision models, and analytical reasoning in large language models. As visual perception and analysis are fundamental to data visualization, in this position paper we ask: how can we harness foundation models to advance progress in visualization design? Specifically, how can multimodal foundation models (MFMs) guide visualization design through visual perception? We approach these questions by investigating the effectiveness of MFMs for perceiving visualization, and formalizing the overall visualization design and optimization space. Specifically, we think that MFMs can best be viewed as judges, equipped with the ability to criticize visualizations, and provide us with actions on how to improve a visualization. We provide a deeper characterization for text-to-image generative models, and multi-modal large language models, organized by what these models provide as output, and how to utilize the output for guiding design decisions. We hope that our perspective can inspire researchers in visualization on how to approach MFMs for visualization design.","accessible_pdf":false,"authors":[{"affiliations":["Vanderbilt University, Nashville, United States"],"email":"matthew.berger@vanderbilt.edu","is_corresponding":true,"name":"Matthew Berger"},{"affiliations":["Lawrence Livermore National Laboratory , Livermore, United States"],"email":"shusenl@sci.utah.edu","is_corresponding":false,"name":"Shusen Liu"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1027","image_caption":"We characterize the use of multimodal foundation models for guiding visualization design.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/iXPK-CWIysQ&t=0h14m41s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h14m41s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"[position paper] The Visualization JUDGE : Can Multimodal Foundation Models Guide Visualization Design Through Visual Perception?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1033","abstract":"Submissions of original research that use Large Language Models (LLMs) or that study their behavior, suddenly account for a sizable portion of works submitted and accepted to visualization (VIS) conferences and similar venues in human-computer interaction (HCI).In this brief position paper, I argue that reviewers are relatively unprepared to evaluate these submissions effectively. To support this conjecture I reflect on my experience serving on four program committees forVIS and HCI conferences over the past year. I will describe common reviewer critiques that I observed and highlight how these critiques influence the review process. I also raise some concerns about these critiques that could limit applied LLM research to all but the best-resourced labs. While I conclude with suggestions for evaluating research contributions that incorporate LLMs, the ultimate goal of this position paper is to simulate a discussion on the review process and its challenges.","accessible_pdf":false,"authors":[{"affiliations":["Tableau Research, Seattle, United States"],"email":"amcrisan@uwaterloo.ca","is_corresponding":true,"name":"Anamaria Crisan"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-beliv-1033","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/iXPK-CWIysQ&t=1h37m33s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=1h37m33s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"We Don't Know How to Assess LLM Contributions in VIS/HCI","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1034","abstract":"This paper revisits the role of quantitative and qualitative methods in visualization research in the context of advancements in artificial intelligence (AI). The focus is on how we can bridge between the different methods in an integrated process of analyzing user study data. To this end, a process model of - potentially iterated - semantic enrichment of data is proposed. This joint perspective of data and semantics facilitates the integration of quantitative and qualitative methods. The model is motivated by examples of prior work, especially in the area of eye tracking user studies and coding data-rich observations. Finally, there is a discussion of open issues and research opportunities in the interplay between AI and qualitative and quantitative methods for visualization research.","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":true,"name":"Daniel Weiskopf"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1034","image_caption":"Illustration of the approach that helps bridge quantitative and qualitative methods for visualization research. The schematic process comprises the research question, study design and execution, and iterative analysis of (possibly multimodal) study data. The key part is the analysis loop that keeps on transforming and enriching data with additional semantics to derive new data representations. Through the process, information is obtained at higher and higher levels of understanding. The analysis loop may consist of AI-based processing, user intervention, or a combination thereof.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.07250","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/iXPK-CWIysQ&t=1h52m33s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=1h52m33s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Bridging Quantitative and Qualitative Methods for Visualization Research: A Data/Semantics Perspective in the Light of Advanced AI","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-beliv-1035","abstract":"Complexity is often seen as a inherent negative in information design, with the job of the designer being to reduce or eliminate complexity, and with principles like Tufte\u2019s \u201cdata-ink ratio\u201d or \u201cchartjunk\u201d to operationalize minimalism and simplicity in visualizations. However, in this position paper, we call for a more expansive view of complexity as a design material, like color or texture or shape: an element of information design that can be used in many ways, many of which are beneficial to the goals of using data to understand the world around us. We describe complexity as a phenomenon that occurs not just in visual design but in every aspect of the sensemaking process, from data collection to interpretation. For each of these stages, we present examples of ways that these various forms of complexity can be used (or abused) in visualization design. We ultimately call on the visualization community to build a more nuanced view of complexity, to look for places to usefully integrate complexity in multiple stages of the design process, and, even when the goal is to reduce complexity, to look for the non-visual forms of complexity that may have otherwise been overlooked.","accessible_pdf":true,"authors":[{"affiliations":["University for Continuing Education Krems, Krems, Austria"],"email":"florian.windhager@donau-uni.ac.at","is_corresponding":false,"name":"Florian Windhager"},{"affiliations":["King's College London, London, United Kingdom"],"email":"alfie.abdulrahman@gmail.com","is_corresponding":false,"name":"Alfie Abdul-Rahman"},{"affiliations":["University of Applied Sciences Potsdam, Potsdam, Germany"],"email":"mark-jan.bludau@fh-potsdam.de","is_corresponding":false,"name":"Mark-Jan Bludau"},{"affiliations":["Warwick Institute for the Science of Cities, Coventry, United Kingdom"],"email":"nicole.hengesbach@posteo.de","is_corresponding":false,"name":"Nicole Hengesbach"},{"affiliations":["University of Amsterdam, Amsterdam, Netherlands"],"email":"h.lamqaddam@uva.nl","is_corresponding":false,"name":"Houda Lamqaddam"},{"affiliations":["OCAD University, Toronto, Canada"],"email":"meirelles.isabel@gmail.com","is_corresponding":false,"name":"Isabel Meirelles"},{"affiliations":["TU Eindhoven, Eindhoven, Netherlands"],"email":"b.speckmann@tue.nl","is_corresponding":false,"name":"Bettina Speckmann"},{"affiliations":["Northeastern University, Portland, United States"],"email":"m.correll@northeastern.edu","is_corresponding":true,"name":"Michael Correll"}],"award":"","doi":"","event_id":"w-beliv","event_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-beliv-1035","image_caption":"Axes of complexity and complexity transformation in visualization design, bridging from project initiation complexity to the complexity of interpretation and communication activities, using the metaphor of a mixing board. A designer might strategically employ higher or lower levels of complexity across these axes to achieve a desired effect. Likewise, changes to one type of complexity shift complexity to other parts of the pipeline.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.07465","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/iXPK-CWIysQ&t=0h38m12s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"associated6b","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)","session_uid":"w-beliv","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/iXPK-CWIysQ&t=0h38m12s","sessions":["BELIV: evaluation and BEyond - methodoLogIcal approaches for Visualization (Sesssion 2)"],"time_stamp":"2024-10-14T16:00:00Z","title":"Complexity as Design Material","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1090","abstract":"Visualizing high dimensional data is challenging, since any dimensionality reduction technique will distort distances. A classic method in cartography\u2013Tissot\u2019s Indicatrix, specific to sphere-to-plane maps\u2013 visualizes distortion using ellipses. Inspired by this idea, we describe the hypertrix: a method for representing distortions that occur when data is projected from arbitrarily high dimensions onto a 2D plane. We demonstrate our technique through synthetic and real-world datasets, and describe how this indicatrix can guide interpretations of nonlinear dimensionality reduction.","accessible_pdf":true,"authors":[{"affiliations":["Harvard University, Boston, United States"],"email":"sraval@g.harvard.edu","is_corresponding":true,"name":"Shivam Raval"},{"affiliations":["Harvard University, Cambridge, United States","Google Research, Cambridge, United States"],"email":"viegas@google.com","is_corresponding":false,"name":"Fernanda Viegas"},{"affiliations":["Harvard University, Cambridge, United States","Google Research, Cambridge, United States"],"email":"wattenberg@gmail.com","is_corresponding":false,"name":"Martin Wattenberg"}],"award":"best","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1090","image_caption":"Hypertrix is an indicatrix for visualizing distortions in high-dimensional data projections. It is an overlay of colored elliptical glyphs on data projections, revealing both the magnitude and direction of local distortions. The hypertrix for a t-SNE projection of the MNIST dataset reveals the compactness of the digit '1' cluster with respect to other clusters.","keywords":["Dimensionality Reduction, High-dimensional data\u2014Distortion\u2014Text Visualization, Clustering"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/WZR6DttAYvo&t=1h2m3s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1090/v-short-1090_Preview.mp4?token=8OKGBT9QpGEiRm869wnviDwZ80_4q4AydJZJrbQVf2k&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1090/v-short-1090_Preview.srt?token=Uaksc-EWIVtQugoqjEhepEOzSd5w8KCNpxZ2dMHiapk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards1","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"VGTC Awards & Best Short Papers","session_uid":"v-short","session_youtube_ff_id":"4S9S0DlrE14","session_youtube_ff_link":"https://youtu.be/4S9S0DlrE14","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/WZR6DttAYvo&t=1h2m3s","sessions":["VGTC Awards & Best Short Papers"],"time_stamp":"2024-10-15T15:10:00Z","title":"Hypertrix: An indicatrix for high-dimensional visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1150","abstract":"Exploratory visual data analysis tools empower data analysts to efficiently and intuitively explore data insights throughout the entire analysis cycle. However, the gap between common programmatic analysis (e.g., within computational notebooks) and exploratory visual analysis leads to a disjointed and inefficient data analysis experience. To bridge this gap, we developed PyGWalker, a Python library that offers on-the-fly assistance for exploratory visual data analysis. It features a lightweight and intuitive GUI with a shelf builder modality. Its loosely coupled architecture supports multiple computational environments to accommodate varying data sizes. Since its release in February 2023, PyGWalker has gained much attention, with 612k downloads on PyPI and over 10.5k stars on GitHub as of June 2024. This demonstrates its value to the data science and visualization community, with researchers and developers integrating it into their own applications and studies.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China","Kanaries Data Inc., Hangzhou, China"],"email":"yue.yu@connect.ust.hk","is_corresponding":true,"name":"Yue Yu"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"lshenaj@connect.ust.hk","is_corresponding":false,"name":"Leixian Shen"},{"affiliations":["Kanaries Data Inc., Hangzhou, China"],"email":"feilong@kanaries.net","is_corresponding":false,"name":"Fei Long"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"},{"affiliations":["Kanaries Data Inc., Hangzhou, China"],"email":"haochen@kanaries.net","is_corresponding":false,"name":"Hao Chen"}],"award":"best","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1150","image_caption":"The image shows the interface of PyGWalker integrated into a Jupyter Notebook. PyGWalker is invoked with a single line of code, allowing users to seamlessly explore and visualize data using drag-and-drop functionality. Its user-friendly interface supports flexible data transformation and interactive visualization, making it popular among the data science community with over 612k downloads through PyPI and 10.8k stars on GitHub.","keywords":["Data Visualization; Exploratory Data Analysis; Computational Notebooks"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2406.11637","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/WZR6DttAYvo&t=1h15m57s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1150/v-short-1150_Preview.mp4?token=flp8n1Z0vXAqQ_oEIRqfsm2QSpr9E6p9FOk9Al4a2YQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1150/v-short-1150_Preview.srt?token=f_y8rHX2MObQ6oElWkMOBTqgjzqz_sECigGOgg1TWf8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards1","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"VGTC Awards & Best Short Papers","session_uid":"v-short","session_youtube_ff_id":"snDdcF8cbO4","session_youtube_ff_link":"https://youtu.be/snDdcF8cbO4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/WZR6DttAYvo&t=1h15m57s","sessions":["VGTC Awards & Best Short Papers"],"time_stamp":"2024-10-15T15:21:00Z","title":"PyGWalker: On-the-fly Assistant for Exploratory Visual Data Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1077","abstract":"A growing body of work draws on feminist thinking to challenge assumptions about how people engage with and use visualizations. This work draws on feminist values, driving design and research guidelines that account for the influences of power and neglect. This prior work is largely prescriptive, however, forgoing articulation of how feminist theories of knowledge \u2014 or feminist epistemology \u2014 can alter research design and outcomes. At the core of our work is an engagement with feminist epistemology, drawing attention to how a new framework for how we know what we know enabled us to overcome intellectual tensions in our research. Specifically, we focus on the theoretical concept of entanglement, central to recent feminist scholarship, and contribute: a history of entanglement in the broader scope of feminist theory; an articulation of the main points of entanglement theory for a visualization context; and a case study of research outcomes as evidence of the potential of feminist epistemology to impact visualization research. This work answers a call in the community to embrace a broader set of theoretical and epistemic foundations and provides a starting point for bringing feminist theories into visualization research.","accessible_pdf":true,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"derya.akbaba@liu.se","is_corresponding":true,"name":"Derya Akbaba"},{"affiliations":["Emory University, Atlanta, United States"],"email":"lauren.klein@emory.edu","is_corresponding":false,"name":"Lauren Klein"},{"affiliations":["Link\u00f6ping University, N\u00f6rrkoping, Sweden"],"email":"miriah.meyer@liu.se","is_corresponding":false,"name":"Miriah Meyer"}],"award":"best","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1077","image_caption":"A series of overlapping circles that are made up of four concentric circles. The inner circle is labeled the knowledge artifact, then entanglements with phenomenon, then entanglements with apparatus, then entanglements. These concentric circles overlap in a wave of entanglements and cover topics listed as: data, vis, insight, power, conventions, technology, history, processes, materiality, people, society, design, labor, politics, ethics, places.","keywords":["Epistemology, feminism, entanglement, theory"],"open_access_supplemental_link":"https://osf.io/ubrdy/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/rw35g","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/d-eG7NRcrKg&t=0h2m15s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1077/v-full-1077_Preview.mp4?token=6NSXfqcdZIqgeto12eTKiolmd1ailFSS5Ylrv9WGhQA&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards2","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Best Full Papers","session_uid":"v-full","session_youtube_ff_id":"x-XyV4J73t4","session_youtube_ff_link":"https://youtu.be/x-XyV4J73t4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/d-eG7NRcrKg&t=0h2m15s","sessions":["Best Full Papers"],"time_stamp":"2024-10-15T16:10:00Z","title":"Entanglements for Visualization: Changing Research Outcomes through Feminist Theory","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1232","abstract":"How do cancer cells grow, divide, proliferate, and die? How do drugs influence these processes? These are difficult questions that we can attempt to answer with a combination of time-series microscopy experiments, classification algorithms, and data visualization.However, collecting this type of data and applying algorithms to segment and track cells and construct lineages of proliferation is error-prone; and identifying the errors can be challenging since it often requires cross-checking multiple data types. Similarly, analyzing and communicating the results necessitates synthesizing different data types into a single narrative. State-of-the-art visualization methods for such data use independent line charts, tree diagrams, and images in separate views. However, this spatial separation requires the viewer of these charts to combine the relevant pieces of data in memory. To simplify this challenging task, we describe design principles for weaving cell images, time-series data, and tree data into a cohesive visualization. Our design principles are based on choosing a primary data type that drives the layout and integrates the other data types into that layout. We then introduce Aardvark, a system that uses these principles to implement novel visualization techniques. Based on Aardvark, we demonstrate the utility of each of these approaches for discovery, communication, and data debugging in a series of case studies. ","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"devin@sci.utah.edu","is_corresponding":true,"name":"Devin Lange"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"robert.judson-torres@hci.utah.edu","is_corresponding":false,"name":"Robert L Judson-Torres"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"tzangle@chemeng.utah.edu","is_corresponding":false,"name":"Thomas A Zangle"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"alex@sci.utah.edu","is_corresponding":false,"name":"Alexander Lex"}],"award":"best","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1232","image_caption":"Live-cell microscopy imaging results in multimodal data composed of trees, time-series, and images. The visualization system Aardvark combines these data modalities into composite visualizations. The tree-first visualization (left) shows the cell relationships as a node-link tree visualization, horizon charts show the time series data and image snippets display alongside the horizon charts. The time-series-first visualization (top right) shows the time-series data as line charts with images and cell relationships superimposed. Finally, the image-first visualization (bottom right) shows the full microscopy image, with cell movement and relationships superimposed.","keywords":["Visualization, Cell Microscopy, View Composition"],"open_access_supplemental_link":"https://osf.io/3f6kr/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/cdbm6","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/d-eG7NRcrKg&t=0h19m12s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1232/v-full-1232_Preview.mp4?token=LCnEMjUCrDIXrJNKZSfQPb2d5F8T_eckC8Qqrdxmxa0&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards2","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Best Full Papers","session_uid":"v-full","session_youtube_ff_id":"5kVue1ySnOk","session_youtube_ff_link":"https://youtu.be/5kVue1ySnOk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/d-eG7NRcrKg&t=0h19m12s","sessions":["Best Full Papers"],"time_stamp":"2024-10-15T16:25:00Z","title":"Aardvark: Composite Visualizations of Trees, Time-Series, and Images","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1332","abstract":"Translating natural language to visualization (NL2VIS) has shown great promise for visual data analysis, but it remains a challenging task that requires multiple low-level implementations, such as natural language processing and visualization design. Recent advancements in pre-trained large language models (LLMs) are opening new avenues for generating visualizations from natural language. However, the lack of a comprehensive and reliable benchmark hinders our understanding of LLMs\u2019 capabilities in visualization generation. In this paper, we address this gap by proposing a new NL2VIS benchmark called VisEval. Firstly, we introduce a high-quality and large-scale dataset. This dataset includes 2,524 representative queries covering 146 databases, paired with accurately labeled ground truths. Secondly, we advocate for a comprehensive automated evaluation methodology covering multiple dimensions, including validity, legality, and readability. By systematically scanning for potential issues with a number of heterogeneous checkers, VisEval provides reliable and trustworthy evaluation outcomes. We run VisEval on a series of state-of-the-art LLMs. Our evaluation reveals prevalent challenges and delivers essential insights for future advancements.","accessible_pdf":false,"authors":[{"affiliations":["Microsoft Research, Shanghai, China"],"email":"christy05.chen@gmail.com","is_corresponding":true,"name":"Nan Chen"},{"affiliations":["Microsoft Research, Shanghai, China"],"email":"scottyugochang@gmail.com","is_corresponding":false,"name":"Yuge Zhang"},{"affiliations":["Microsoft Research, Shanghai, China"],"email":"jiahangxu@microsoft.com","is_corresponding":false,"name":"Jiahang Xu"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"rk.ren@outlook.com","is_corresponding":false,"name":"Kan Ren"},{"affiliations":["Microsoft Research, Shanghai, China"],"email":"yuqyang@microsoft.com","is_corresponding":false,"name":"Yuqing Yang"}],"award":"best","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1332","image_caption":"Examples of visualization issues detected by VisEval: Llama (CodeLlama-7B) produces code that cannot be executed, while Gemini (Gemini-Pro) incorrectly maps the \"sum of Tonnage\" to the y-axis instead of \"count\" and lacks a legend for the \"Cargo ship\" color. GPT-3.5 fails to sort as specified and places the legend outside the canvas. Although GPT-4 almost meets the requirements, it still encounters overflow issues that impact readability.","keywords":["Visualization evaluation, automatic visualization, large language models, benchmark"],"open_access_supplemental_link":"https://github.com/microsoft/VisEval","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.00981","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/d-eG7NRcrKg&t=0h36m12s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1332/v-full-1332_Preview.mp4?token=tbykaWmlhAAS8qHK-3sM9HAod8Q6W5G5TKF9TC0sT64&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards2","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Best Full Papers","session_uid":"v-full","session_youtube_ff_id":"lKkg-pUufh8","session_youtube_ff_link":"https://youtu.be/lKkg-pUufh8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/d-eG7NRcrKg&t=0h36m12s","sessions":["Best Full Papers"],"time_stamp":"2024-10-15T16:40:00Z","title":"VisEval: A Benchmark for Data Visualization in the Era of Large Language Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1802","abstract":"In the biomedical domain, visualizing the document embeddings of an extensive corpus has been widely used in information-seeking tasks. However, three key challenges with existing visualizations make it difficult for clinicians to find information efficiently. First, the document embeddings used in these visualizations are generated statically by pretrained language models, which cannot adapt to the user's evolving interest. Second, existing document visualization techniques cannot effectively display how the documents are relevant to users\u2019 interest, making it difficult for users to identify the most pertinent information. Third, existing embedding generation and visualization processes suffer from a lack of interpretability, making it difficult to understand, trust and use the result for decision-making. In this paper, we present a novel visual analytics pipeline for user-driven document representation and iterative information seeking (VADIS). VADIS introduces a prompt-based attention model (PAM) that generates dynamic document embedding and document relevance adjusted to the user's query. To effectively visualize these two pieces of information, we design a new document map that leverages a circular grid layout to display documents based on both their relevance to the query and the semantic similarity. Additionally, to improve the interpretability, we introduce a corpus-level attention visualization method to improve the user's understanding of the model focus and to enable the users to identify potential oversight. This visualization, in turn, empowers users to refine, update and introduce new queries, thereby facilitating a dynamic and iterative information-seeking experience. We evaluated VADIS quantitatively and qualitatively on a real-world dataset of biomedical research papers to demonstrate its effectiveness.","accessible_pdf":false,"authors":[{"affiliations":["Ohio State University, Columbus, United States"],"email":"qiu.580@buckeyemail.osu.edu","is_corresponding":true,"name":"Rui Qiu"},{"affiliations":["The Ohio State University, Columbus, United States"],"email":"tu.253@osu.edu","is_corresponding":false,"name":"Yamei Tu"},{"affiliations":["Washington University School of Medicine in St. Louis, St. Louis, United States"],"email":"yenp@wustl.edu","is_corresponding":false,"name":"Po-Yin Yen"},{"affiliations":["The Ohio State University , Columbus , United States"],"email":"hwshen@cse.ohio-state.edu","is_corresponding":false,"name":"Han-Wei Shen"}],"award":"best","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1802","image_caption":"Traditional document maps cluster documents based on static embeddings, leading to confusing grouping with inconsistent semantic concepts. We propose Prompt-based Attention Model (PAM) that generates prompt-specific document representations to better align with human interest. Recognizing that not all documents are equally relevant to a user\u2019s specific interest, we present Relevance-preserving mapping to project documents based on both their relevance to the user\u2019s interest, and their inter-similarity under user\u2019s interest. The mapping features a circular layout that centralizes the most pertinent documents, which aligns with both human\u2019s natural viewing pattern and the distribution of documents\u2019 relevance.","keywords":["Attention visualization, dynamic document representation, document visualization, biomedical information seeking"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/d-eG7NRcrKg&t=0h52m38s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1802/v-full-1802_Preview.mp4?token=LA7YlgNQGkiZLyHEnN17Ob983PPFPSfpWFr16hnzl8M&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1802/v-full-1802_Preview.srt?token=_kmNMLgzJ1-tuMdPqbZvT77tnne1hHmQvmqvnSs1RrA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards2","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Best Full Papers","session_uid":"v-full","session_youtube_ff_id":"iafjQjWEHIY","session_youtube_ff_link":"https://youtu.be/iafjQjWEHIY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/d-eG7NRcrKg&t=0h52m38s","sessions":["Best Full Papers"],"time_stamp":"2024-10-15T16:55:00Z","title":"VADIS: A Visual Analytics Pipeline for Dynamic Document Representation and Information Seeking","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1880","abstract":"Merge trees are a valuable tool in the scientific visualization of scalar fields; however, current methods for merge tree comparisons are computationally expensive, primarily due to the exhaustive matching between tree nodes. To address this challenge, we introduce the Merge Tree Neural Network (MTNN), a learned neural network model designed for merge tree comparison. The MTNN enables rapid and high-quality similarity computation. We first demonstrate how to train graph neural networks, which emerged as effective encoders for graphs, in order to produce embeddings of merge trees in vector spaces for efficient similarity comparison. Next, we formulate the novel MTNN model that further improves the similarity comparisons by integrating the tree and node embeddings with a new topological attention mechanism. We demonstrate the effectiveness of our model on real-world data in different domains and examine our model\u2019s generalizability across various datasets. Our experimental analysis demonstrates our approach\u2019s superiority in accuracy and efficiency. In particular, we speed up the prior state-of-the-art by more than 100\u00d7 on the benchmark datasets while maintaining an error rate below 0.1%.","accessible_pdf":true,"authors":[{"affiliations":["Tulane University, New Orleans, United States"],"email":"yqin2@tulane.edu","is_corresponding":true,"name":"Yu Qin"},{"affiliations":["Montana State University, Bozeman, United States"],"email":"brittany.fasy@montana.edu","is_corresponding":false,"name":"Brittany Terese Fasy"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"cwenk@tulane.edu","is_corresponding":false,"name":"Carola Wenk"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"bsumma@tulane.edu","is_corresponding":false,"name":"Brian Summa"}],"award":"best","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1880","image_caption":"Merge tree comparisons are essential in scientific visualization but are often limited by the slow, computationally heavy process of matching tree nodes. Our Merge Tree Neural Network (MTNN) transforming merge tree comparison into a learning task. This innovation significantly reduces computation time by over 100 times, while maintaining near-perfect accuracy. MTNN stands out as a powerful tool for efficient and precise scientific visualization.","keywords":["computational topology, merge trees, graph neural networks"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2404.05879","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/d-eG7NRcrKg&t=1h9m8s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1880/v-full-1880_Preview.mp4?token=g3OGwgih9TpwR_wddxmFWU_U55zB38PzucsWWRcn5iY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1880/v-full-1880_Preview.srt?token=MbBPP-6mqwzqJUOgoWyt9bGZXNkqqkjKnD4AxqMvjJg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"awards2","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Best Full Papers","session_uid":"v-full","session_youtube_ff_id":"5x_3_xJ0xKc","session_youtube_ff_link":"https://youtu.be/5x_3_xJ0xKc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/d-eG7NRcrKg&t=1h9m8s","sessions":["Best Full Papers"],"time_stamp":"2024-10-15T17:10:00Z","title":"Rapid and Precise Topological Comparison with Merge Tree Neural Networks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10091124","abstract":"The Internet of Food (IoF) is an emerging field in smart foodsheds, involving the creation of a knowledge graph (KG) about the environment, agriculture, food, diet, and health. However, the heterogeneity and size of the KG present challenges for downstream tasks, such as information retrieval and interactive exploration. To address those challenges, we propose an interactive knowledge and learning environment (IKLE) that integrates three programming and modeling languages to support multiple downstream tasks in the analysis pipeline. To make IKLE easier to use, we have developed algorithms to automate the generation of each language. In addition, we collaborated with domain experts to design and develop a dataflow visualization system, which embeds the automatic language generations into components and allows users to build their analysis pipeline by dragging and connecting components of interest. We have demonstrated the effectiveness of IKLE through three real-world case studies in smart foodsheds.","accessible_pdf":true,"authors":[{"affiliations":"","email":"tu.253@osu.edu","is_corresponding":false,"name":"Yamei Tu"},{"affiliations":"","email":"wang.5502@osu.edu","is_corresponding":true,"name":"Xiaoqi Wang"},{"affiliations":"","email":"qiu.580@osu.edu","is_corresponding":false,"name":"Rui Qiu"},{"affiliations":"","email":"hwshen@cse.ohio-state.edu","is_corresponding":false,"name":"Han-Wei Shen"},{"affiliations":"","email":"mmmille6@wisc.edu","is_corresponding":false,"name":"Michelle Miller"},{"affiliations":"","email":"jinmeng.rao@wisc.edu","is_corresponding":false,"name":"Jinmeng Rao"},{"affiliations":"","email":"song.gao@wisc.edu","is_corresponding":false,"name":"Song Gao"},{"affiliations":"","email":"prhuber@ucdavis.edu","is_corresponding":false,"name":"Patrick R. Huber"},{"affiliations":"","email":"adhollander@ucdavis.edu","is_corresponding":false,"name":"Allan D. Hollander"},{"affiliations":"","email":"matthew@ic-foods.org","is_corresponding":false,"name":"Matthew Lange"},{"affiliations":"","email":"cgarcia@tacc.utexas.edu","is_corresponding":false,"name":"Christian R. Garcia"},{"affiliations":"","email":"jstubbs@tacc.utexas.edu","is_corresponding":false,"name":"Joe Stubbs"}],"award":"","doi":"10.1109/MCG.2023.3263960","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10091124","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10091124","image_caption":"(A) We propose an interactive knowledge and learning environment (IKLE) that integrates three programming and modeling languages to support multiple downstream tasks in the analysis pipeline. To make IKLE easier to use, we have developed algorithms to automate the generation of each language. In addition, we collaborated with domain experts to design and develop a dataflow visualization system, which embeds the automatic language generations into components and allows users to build their analysis pipeline by dragging and connecting components of interest. (B) the overview of our IKLE and its architecture.","keywords":["Learning Environment, Interactive Learning Environments, Programming Language, Visual System, Analysis Pipeline, Patterns In Data, Flow Data, Human-computer Interaction, Food Systems, Information Retrieval, Domain Experts, Language Model, Automatic Generation, Interactive Exploration, Cyberinfrastructure, Pre-trained Language Models, Resource Description Framework, SPARQL Query, DBpedia, Entity Types, Data Visualization, Resilience Analysis, Load Data, Query Results, Supply Chain, Network Flow"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://icicle.osu.edu/sites/default/files/2023-04/An_Interactive_Knowledge_and_Learning_Environment_in_Smart_Foodsheds.pdf","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-4EH4hLDfyA&t=0h37m23s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10091124/v-cga-10091124_Preview.mp4?token=AohQXqEwVCpW2S-BqidwwXDKQKJWz4bjQz4U8-PS4dA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10091124/v-cga-10091124_Preview.srt?token=QpVRyv6MAkpWAPCkp9OJ9zeW0CdQNCxWZuUc7UdI1f8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"g_5lfaP_5eQ","session_youtube_ff_link":"https://youtu.be/g_5lfaP_5eQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=0h37m23s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T16:36:00Z","title":"An Interactive Knowledge and Learning Environment in Smart Foodsheds","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10198358","abstract":"Set visualization facilitates the exploration and analysis of set-type data. However, how sets should be visualized when the data are uncertain is still an open research challenge. To address the problem of depicting uncertainty in set visualization, we ask 1) which aspects of set type data can be affected by uncertainty and 2) which characteristics of uncertainty influence the visualization design. We answer these research questions by first describing a conceptual framework that brings together 1) the information that is primarily relevant in sets (i.e., set membership, set attributes, and element attributes) and 2) different plausible categories of (un)certainty (i.e., certainty, undefined uncertainty as a binary fact, and defined uncertainty as quantifiable measure). Following the structure of our framework, we systematically discuss basic visualization examples of integrating uncertainty in set visualizations. We draw on existing knowledge about general uncertainty visualization and previous evidence of its effectiveness.","accessible_pdf":true,"authors":[{"affiliations":"","email":"christian.tominski@uni-rostock.de","is_corresponding":false,"name":"Christian Tominski"},{"affiliations":"","email":"m.behrisch@uu.nl","is_corresponding":true,"name":"Michael Behrisch"},{"affiliations":"","email":"susanne.bleisch@fhnw.ch","is_corresponding":false,"name":"Susanne Bleisch"},{"affiliations":"","email":"sara.fabrikant@geo.uzh.ch","is_corresponding":false,"name":"Sara Irina Fabrikant"},{"affiliations":"","email":"eva.mayr@donau-uni.ac.at","is_corresponding":false,"name":"Eva Mayr"},{"affiliations":"","email":"miksch@ifs.tuwien.ac.at","is_corresponding":false,"name":"Silvia Miksch"},{"affiliations":"","email":"helen.purchase@monash.edu","is_corresponding":false,"name":"Helen Purchase"}],"award":"","doi":"10.1109/MCG.2023.3300441","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10198358","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10198358","image_caption":"Visualizing uncertainty in set-type data is crucial for accurate analysis and decision-making. This work introduces a framework that categorizes data characteristics and types of uncertainty, providing strategies for integrating uncertainty into visualizations. By addressing set membership, set attributes, and element attributes, the framework helps design effective visual representations that communicate both data and its inherent uncertainties. This approach not only aids in understanding complex datasets but also enhances decision-making in various applications, from academic course planning to complex scenarios like ensemble forecasting and gene mapping.","keywords":["Uncertainty, Data Visualization, Measurement Uncertainty, Visual Analytics, Terminology, Task Analysis, Surveys, Conceptual Framework, Cardinality, Data Visualization, Visual Representation, Measure Of The Amount, Set Membership, Intersection Set, Visual Design, Different Types Of Uncertainty, Missing Values, Visual Methods, Fuzzy Set, Age Of Students, Color Values, Uncertainty Values, Explicit Representation, Aggregate Value, Exact Information, Uncertain Information, Table Cells, Temporal Uncertainty, Uncertain Data, Representation Of Uncertainty, Implicit Representation, Spatial Uncertainty, Point Symbol, Visual Clutter, Color Hue, Graphical Elements, Uncertain Value"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2302.11575","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-4EH4hLDfyA&t=0h48m58s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10198358/v-cga-10198358_Preview.mp4?token=Mgq_Nnpy6uatRSqLhcqCj4lNl75aRjlj9BMREc1eD9Q&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10198358/v-cga-10198358_Preview.srt?token=W8A4RCOde5v9L6Pp5c4soF_xKoPB1ZvUhyZZpbB29kw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"nFYQtRmiwzM","session_youtube_ff_link":"https://youtu.be/nFYQtRmiwzM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=0h48m58s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T16:48:00Z","title":"Visualizing Uncertainty in Sets","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10227838","abstract":"We report a study investigating the viability of using interactive visualizations to aid architectural design with building codes. While visualizations have been used to support general architectural design exploration, existing computational solutions treat building codes as separate from, rather than part of, the design process, creating challenges for architects. Through a series of participatory design studies with professional architects, we found that interactive visualizations have promising potential to aid design exploration and sensemaking in early stages of architectural design by providing feedback about potential allowances and consequences of design decisions. However, implementing a visualization system necessitates addressing the complexity and ambiguity inherent in building codes. To tackle these challenges, we propose various user-driven knowledge management mechanisms for integrating, negotiating, interpreting, and documenting building code rules.","accessible_pdf":false,"authors":[{"affiliations":"","email":"snowak@sfu.ca","is_corresponding":true,"name":"Stan Nowak"},{"affiliations":"","email":"bon.aseniero@autodesk.com","is_corresponding":false,"name":"Bon Adriel Aseniero"},{"affiliations":"","email":"lyn@sfu.ca","is_corresponding":false,"name":"Lyn Bartram"},{"affiliations":"","email":"tovi@dgp.toronto.edu","is_corresponding":false,"name":"Tovi Grossman"},{"affiliations":"","email":"George.fitzmaurice@autodesk.com","is_corresponding":false,"name":"George Fitzmaurice"},{"affiliations":"","email":"justin.matejka@autodesk.com","is_corresponding":false,"name":"Justin Matejka"}],"award":"","doi":"10.1109/MCG.2023.3307971","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10227838","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10227838","image_caption":"Design probes exploring information visualization and broader interactive systems solutions to help architects design with building codes.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-4EH4hLDfyA&t=1h2m1s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10227838/v-cga-10227838_Preview.mp4?token=kj-oA3y_pasQYoWCZtyXzfvynLKMBJFDKneU9PbgfLk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10227838/v-cga-10227838_Preview.srt?token=LMmZ-ChkH0tezmvWvNWVuorm1os61SHGICy2iBqjtp8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"uquxa5bjs8I","session_youtube_ff_link":"https://youtu.be/uquxa5bjs8I","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=1h2m1s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T17:00:00Z","title":"Identifying Visualization Opportunities to Help Architects Manage the Complexity of Building Codes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-9612019","abstract":"The number of online news articles available nowadays is rapidly increasing. When exploring articles on online news portals, navigation is mostly limited to the most recent ones. The spatial context and the history of topics are not immediately accessible. To support readers in the exploration or research of articles in large datasets, we developed an interactive 3D globe visualization. We worked with datasets from multiple online news portals containing up to 45,000 articles. Using agglomerative hierarchical clustering, we represent the referenced locations of news articles on a globe with different levels of detail. We employ two interaction schemes for navigating the viewpoint on the visualization, including support for hand-held devices and desktop PCs, and provide search functionality and interactive filtering. Based on this framework, we explore additional modules for jointly exploring the spatial and temporal domain of the dataset and incorporating live news into the visualization.","accessible_pdf":false,"authors":[{"affiliations":"","email":"nicholas.ingulfsen@gmail.com","is_corresponding":false,"name":"Nicholas Ingulfsen"},{"affiliations":"","email":"simone.schaub@visinf.tu-darmstadt.de","is_corresponding":false,"name":"Simone Schaub-Meyer"},{"affiliations":"","email":"grossm@inf.ethz.ch","is_corresponding":false,"name":"Markus Gross"},{"affiliations":"","email":"tobias.guenther@fau.de","is_corresponding":true,"name":"Tobias G\u00fcnther"}],"award":"","doi":"10.1109/MCG.2021.3127434","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"9612019","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-9612019","image_caption":"Most news websites provide access to only the most recent articles and offer no support to explore the temporal evolution of news. Further, many articles contain the names of places, which would allow to geolocalize and cluster news. With news globe, we provide a visualization system that gives readers the means to explore both the spatial and temporal dimension in a georeferenced context. ","keywords":["News Articles, Number Of Articles, Headlines, Interactive Visualization, Online News, Agglomerative Clustering, Local News, Interactive Exploration, Desktop PC, Different Levels Of Detail, News Portals, Spatial Information, User Study, 3D Space, Human-computer Interaction, Temporal Information, Third Dimension, Tablet Computer, Pie Chart, News Stories, 3D Visualization, Article Details, Visual Point, Bottom Of The Screen, Geospatial Data, Type Of Visualization, Largest Dataset, Tagging Location, Live Feed"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-4EH4hLDfyA&t=0h9m58s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9612019/v-cga-9612019_Preview.mp4?token=GkTT3Eg_EbilgJj-ejZQBPP0XWewvUFObcmJeT41RPs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9612019/v-cga-9612019_Preview.srt?token=SbBNukZ73Zn3zPtlouAYThhFOuvs58j8-yjM0sxCOzY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"lL3SWpaLWQs","session_youtube_ff_link":"https://youtu.be/lL3SWpaLWQs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=0h9m58s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T16:12:00Z","title":"News Globe: Visualization of Geolocalized News Articles","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-9745375","abstract":"We consider the general problem known as job shop scheduling, in which multiple jobs consist of sequential operations that need to be executed or served by appropriate machines having limited capacities. For example, train journeys (jobs) consist of moves and stops (operations) to be served by rail tracks and stations (machines). A schedule is an assignment of the job operations to machines and times where and when they will be executed. The developers of computational methods for job scheduling need tools enabling them to explore how their methods work. At a high level of generality, we define the system of pertinent exploration tasks and a combination of visualizations capable of supporting the tasks. We provide general descriptions of the purposes, contents, visual encoding, properties, and interactive facilities of the visualizations and illustrate them with images from an example implementation in air traffic management. We justify the design of the visualizations based on the tasks, principles of creating visualizations for pattern discovery, and scalability requirements. The outcomes of our research are sufficiently general to be of use in a variety of applications.","accessible_pdf":false,"authors":[{"affiliations":"","email":"gennady.andrienko@iais.fraunhofer.de","is_corresponding":true,"name":"Gennady Andrienko"},{"affiliations":"","email":"natalia.andrienko@iais.fraunhofer.de","is_corresponding":false,"name":"Natalia Andrienko"},{"affiliations":"","email":"jmcordero@e-crida.enaire.es","is_corresponding":false,"name":"Jose Manuel Cordero Garcia"},{"affiliations":"","email":"dirk.hecker@iais.fraunhofer.de","is_corresponding":false,"name":"Dirk Hecker"},{"affiliations":"","email":"georgev@unipi.gr","is_corresponding":false,"name":"George A. Vouros"}],"award":"","doi":"10.1109/MCG.2022.3163437","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"9745375","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-9745375","image_caption":"Example of a schedule view showing three versions of a schedule","keywords":["Visualization, Schedules, Task Analysis, Optimization, Job Shop Scheduling, Data Analysis, Processor Scheduling, Iterative Methods"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://openaccess.city.ac.uk/id/eprint/28062/","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-4EH4hLDfyA&t=0h1m18s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9745375/v-cga-9745375_Preview.mp4?token=Fdm_ecxXarixBi1Z9WgSBNT5yfnA0tluZk-dbQApEz4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9745375/v-cga-9745375_Preview.srt?token=2W4yg6vklLtn31kpj4gAMAAeNRrh_cCPFAD5iM9OjKo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"wj0IQ4MZIGs","session_youtube_ff_link":"https://youtu.be/wj0IQ4MZIGs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=0h1m18s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T16:00:00Z","title":"Supporting Visual Exploration of Iterative Job Scheduling","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-9866547","abstract":"In many applications, developed deep-learning models need to be iteratively debugged and refined to improve the model efficiency over time. Debugging some models, such as temporal multilabel classification (TMLC) where each data point can simultaneously belong to multiple classes, can be especially more challenging due to the complexity of the analysis and instances that need to be reviewed. In this article, focusing on video activity recognition as an application of TMLC, we propose DETOXER, an interactive visual debugging system to support finding different error types and scopes through providing multiscope explanations.","accessible_pdf":false,"authors":[{"affiliations":"","email":"m.nourani@northeastern.edu","is_corresponding":true,"name":"Mahsan Nourani"},{"affiliations":"","email":"chiradeep.roy@utdallas.edu","is_corresponding":false,"name":"Chiradeep Roy"},{"affiliations":"","email":"dhoneycutt@ufl.edu","is_corresponding":false,"name":"Donald R. Honeycutt"},{"affiliations":"","email":"eragan@ufl.edu","is_corresponding":false,"name":"Eric D. Ragan"},{"affiliations":"","email":"vibhav.gogate@utdallas.edu","is_corresponding":false,"name":"Vibhav Gogate"}],"award":"","doi":"10.1109/MCG.2022.3201465","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"9866547","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-9866547","image_caption":"Overview of DETOXER, a visual (de)bugging (to)ol with Multi-Scope E(x)planations for (er)ror detection in Temporal Multi-Label Classification. In the center, a video is selected for exploration. Directly under the progress bar, heatmaps demonstrate the model\u2019s confidence for any given label per second (frame-level explanations)-(C). On the left, available videos are shown; for each video, the tool shows top-5 detected labels (A) and the rate of FP and FN errors (B) in the video (video-level explanations). The selected video is emphasized with a blue background. On the right, a global information panel displays model performance metrics (D) and object-specific FN and FP error rates in two vertically adjacent bar charts (E) (Global-level explanations).","keywords":["Debugging, Analytical Models, Heating Systems, Data Models, Computational Modeling, Activity Recognition, Deep Learning, Multi Label Classification, Visualization Tool, Temporal Classification, Visual Debugging, False Positive, False Negative, Active Components, Deep Learning Models, Types Of Errors, Video Frames, Error Detection, Detection Of Types, Action Recognition, Interactive Visualization, Sequence Of Points, Design Goals, Positive Errors, Critical Outcomes, Error Patterns, Global Panel, False Negative Rate, False Positive Rate, Heatmap, Visual Approach, Truth Labels, True Positive, Confidence Score, Anomaly Detection, Interface Elements"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-4EH4hLDfyA&t=0h23m32s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9866547/v-cga-9866547_Preview.mp4?token=B9Wp229HHlCcfq9D9Qr7a55Xu9ESLWtT3VHomkFEYTg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-9866547/v-cga-9866547_Preview.srt?token=tPdcwmd1QEjEtJ9-F3m_oFN3MZGg13Pzbvi5ghWN3Ns&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Analytics and Applications","session_uid":"v-cga","session_youtube_ff_id":"6eBUBzR5Zlc","session_youtube_ff_link":"https://youtu.be/6eBUBzR5Zlc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-4EH4hLDfyA&t=0h23m32s","sessions":["CG&A: Analytics and Applications"],"time_stamp":"2024-10-16T16:24:00Z","title":"DETOXER: A Visual Debugging Tool With Multiscope Explanations for Temporal Multilabel Classification","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10078374","abstract":"Existing dynamic weighted graph visualization approaches rely on users\u2019 mental comparison to perceive temporal evolution of dynamic weighted graphs, hindering users from effectively analyzing changes across multiple timeslices. We propose DiffSeer, a novel approach for dynamic weighted graph visualization by explicitly visualizing the differences of graph structures (e.g., edge weight differences) between adjacent timeslices. Specifically, we present a novel nested matrix design that overviews the graph structure differences over a time period as well as shows graph structure details in the timeslices of user interest. By collectively considering the overall temporal evolution and structure details in each timeslice, an optimization-based node reordering strategy is developed to group nodes with similar evolution patterns and highlight interesting graph structure details in each timeslice. We conducted two case studies on real-world graph datasets and in-depth interviews with 12 target users to evaluate DiffSeer. The results demonstrate its effectiveness in visualizing dynamic weighted graphs.","accessible_pdf":false,"authors":[{"affiliations":"","email":"wenxiaolin@stu.scu.edu.cn","is_corresponding":true,"name":"Xiaolin Wen"},{"affiliations":"","email":"yongwang@smu.edu.sg","is_corresponding":false,"name":"Yong Wang"},{"affiliations":"","email":"wumeixuan@stu.scu.edu.cn","is_corresponding":false,"name":"Meixuan Wu"},{"affiliations":"","email":"wangfengjie@stu.scu.edu.cn","is_corresponding":false,"name":"Fengjie Wang"},{"affiliations":"","email":"xuanwu.yue@connect.ust.hk","is_corresponding":false,"name":"Xuanwu Yue"},{"affiliations":"","email":"shenqm@sustech.edu.cn","is_corresponding":false,"name":"Qiaomu Shen"},{"affiliations":"","email":"mayx@sustech.edu.cn","is_corresponding":false,"name":"Yuxin Ma"},{"affiliations":"","email":"zhumin@scu.edu.cn","is_corresponding":false,"name":"Min Zhu"}],"award":"","doi":"10.1109/MCG.2023.3248289","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10078374","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10078374","image_caption":"Overview of DiffSeer: We focus on explicitly visualizing the differences between adjacent timeslices to support the analysis of the dynamic weighted graph evolution over a long time. Specifically, we proposed a nested matrix design, including (A) an overview matrix to provide a visual summary of differences and two types (B, C) of detail matrices to enable interactive inspection of graph details on demand. An optimization- based node reordering strategy is incorporated in the nested matrix design to group together nodes with similar evolution patterns and highlight interesting graph structure details in each timeslice.","keywords":["Visibility Graph, Spatial Patterns, Weight Change, In-depth Interviews, Temporal Changes, Temporal Evolution, Negative Changes, Interesting Patterns, Edge Weights, Real-world Datasets, Graph Structure, Visual Approach, Dynamic Visualization, Dynamic Graph, Financial Networks, Graph Datasets, Similar Evolutionary Patterns, User Interviews, Similar Changes, Chinese New Year, Sector Indices, Original Graph, Red Rectangle, Nodes In Order, Stock Market Crash, Stacked Bar Charts, Different Types Of Matrices, Chinese New, Blue Rectangle"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2302.07609","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Fy9c_xgh_I8&t=0h0m4s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10078374/v-cga-10078374_Preview.mp4?token=GPwpi68jEh0kOeIhl55NTOpGkzJSkquipYwccv1aRxY&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"YpfkEg3bHfE","session_youtube_ff_link":"https://youtu.be/YpfkEg3bHfE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=0h0m4s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T16:00:00Z","title":"DiffSeer: Difference-Based Dynamic Weighted Graph Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10128890","abstract":"Some 15 years ago, Visualization Viewpoints published an influential article titled Rainbow Color Map (Still) Considered Harmful (Borland and Taylor, 2007). The paper argued that the \u201crainbow colormap\u2019s characteristics of confusing the viewer, obscuring the data and actively misleading interpretation make it a poor choice for visualization.\u201d Subsequent articles often repeat and extend these arguments, so much so that avoiding rainbow colormaps, along with their derivatives, has become dogma in the visualization community. Despite this loud and persistent recommendation, scientists continue to use rainbow colormaps. Have we failed to communicate our message, or do rainbow colormaps offer advantages that have not been fully appreciated? We argue that rainbow colormaps have properties that are underappreciated by existing design conventions. We explore key critiques of the rainbow in the context of recent research to understand where and how rainbows might be misunderstood. Choosing a colormap is a complex task, and rainbow colormaps can be useful for selected applications.","accessible_pdf":false,"authors":[{"affiliations":"","email":"cware@ccom.unh.edu","is_corresponding":false,"name":"Colin Ware"},{"affiliations":"","email":"mstone@acm.org","is_corresponding":true,"name":"Maureen Stone"},{"affiliations":"","email":"danielle.szafir@cs.unc.edu","is_corresponding":false,"name":"Danielle Albers Szafir"}],"award":"","doi":"10.1109/MCG.2023.3246111","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10128890","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10128890","image_caption":"Rainbow colormaps have long been criticized, especially when shape-from-shading is required (upper left). But domain experts continue to use them, especially for highlighting specific values and global patterns (lower left). Classic rainbows have uneven hue distribution and erratic luminance profiles. But it is possible to craft rainbow colormaps that avoid these problems. (upper right).Placing hues on key values can create a useful \u201ccolor ruler.\u201d (lower right) We understand well enough why rainbows can be bad; let us instead work to find out when and why they are good. ","keywords":["Image Color Analysis, Semantics, Data Visualization, Estimation, Reliability Engineering"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Fy9c_xgh_I8&t=0h11m29s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10128890/v-cga-10128890_Preview.mp4?token=MdjcNS3-czC61d_9XyQpByoEmEXu9H8Re4F1GSKFbiw&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"LJhB4o315nU","session_youtube_ff_link":"https://youtu.be/LJhB4o315nU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=0h11m29s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T16:12:00Z","title":"Rainbow Colormaps Are Not All Bad","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10201383","abstract":"Although visualizations are a useful tool for helping people to understand information, they can also have unintended effects on human cognition. This is especially true for uncertain information, which is difficult for people to understand. Prior work has found that different methods of visualizing uncertain information can produce different patterns of decision making from users. However, uncertainty can also be represented via text or numerical information, and few studies have systematically compared these types of representations to visualizations of uncertainty. We present two experiments that compared visual representations of risk (icon arrays) to numerical representations (natural frequencies) in a wildfire evacuation task. Like prior studies, we found that different types of visual cues led to different patterns of decision making. In addition, our comparison of visual and numerical representations of risk found that people were more likely to evacuate when they saw visualizations than when they saw numerical representations. These experiments reinforce the idea that design choices are not neutral: seemingly minor differences in how information is represented can have important impacts on human risk perception and decision making.","accessible_pdf":true,"authors":[{"affiliations":"","email":"lematze@sandia.gov","is_corresponding":true,"name":"Laura E. Matzen"},{"affiliations":"","email":"bchowel@sandia.gov","is_corresponding":false,"name":"Breannan C. Howell"},{"affiliations":"","email":"mctrumb@sandia.gov","is_corresponding":false,"name":"Michael C. S. Trumbo"},{"affiliations":"","email":"kmdivis@sandia.gov","is_corresponding":false,"name":"Kristin M. Divis"}],"award":"","doi":"10.1109/MCG.2023.3299875","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10201383","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10201383","image_caption":"This figure shows stimuli from an experiment comparing two representations of probability: natural frequencies and icon arrays. Although these representations convey the same information, the visual cues provided by the icon arrays can change people's perception of the risk.","keywords":["Visualization, Uncertainty, Decision Making, Costs, Task Analysis, Laboratories, Information Analysis, Decision Making, Visual Representation, Numerical Representation, Decision Patterns, Deterministic, Risk Perception, Specific Information, Fundamental Frequency, Point Values, Representation Of Information, Risk Information, Visual Conditions, Numerous Conditions, Human Decision, Numerical Information, Impact Of Different Types, Uncertain Information, Type Of Visualization, Differences In Risk Perception, Representation Of Uncertainty, Increase In Participation, Participants In Experiment, Individual Difference Measures, Sandia National Laboratories, Risk Propensity, Bonus Payments, Average Response Time, Difference In Probability, Response Time"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Fy9c_xgh_I8&t=0h37m8s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10201383/v-cga-10201383_Preview.mp4?token=vTrP56YmWW_mcWhejPKjE3Jm7h8whXcyMJQ7mGIfOYg&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"WMGfURPRFEg","session_youtube_ff_link":"https://youtu.be/WMGfURPRFEg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=0h37m8s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T16:36:00Z","title":"Numerical and Visual Representations of Uncertainty Lead to Different Patterns of Decision Making","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10207831","abstract":"The membership function is to categorize quantities along with a confidence degree. This article investigates a generic user interaction based on this function for categorizing various types of quantities without modification, which empowers users to articulate uncertainty categorization and enhance their visual data analysis significantly. We present the technique design and an online prototype, supplementing with insights from three case studies that highlight the technique\u2019s efficacy among different types of quantities. Furthermore, we conduct a formal user study to scrutinize the process and reasoning users employ while utilizing our technique. The findings indicate that our technique can help users create customized categories. Both our code and the interactive prototype are made available as open-source resources, intended for application across varied domains as a generic tool.","accessible_pdf":true,"authors":[{"affiliations":"","email":"liuliqun.cs@gmail.com","is_corresponding":true,"name":"Liqun Liu"},{"affiliations":"","email":"romain.vuillemot@ec-lyon.fr","is_corresponding":false,"name":"Romain Vuillemot"}],"award":"","doi":"10.1109/MCG.2023.3301449","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10207831","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10207831","image_caption":"The illustration of an interactive membership function. Users can change the shape of the membership function by dragging the black points in (a) to adjust the range of the categories (Children, Youth, Adult, and Old). This interactive membership function helps users map the quantities (column Age) into categories (column Categories). The table in (b) shows the membership degrees derived from the membership function.","keywords":["Data Visualization, Uncertainty, Prototypes, Fuzzy Logic, Image Color Analysis, Fuzzy Sets, Open Source Software, General Function, Membership Function, User Study, Classification Process, Fuzzy Logic, Quantitative Values, Visualization Techniques, Amount Of Type, Fuzzy Theory, General Interaction, Temperature Dataset, Interaction Techniques, Carbon Dioxide, Computation Time, Rule Based, Web Page, Real World Scenarios, Fuzzy Set, Domain Experts, Supercritical CO 2, Parallel Coordinates, Fuzzy System, Fuzzy Clustering, Interactive Visualization, Amount Of Items, Large Scale Problems"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-04241000/document","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Fy9c_xgh_I8&t=0h24m18s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10207831/v-cga-10207831_Preview.mp4?token=bxQnsCspXz7hbX4rJka9sDjNpl6wHU5a4_ah1whqqos&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10207831/v-cga-10207831_Preview.srt?token=q77LkQdAuZz_5yVfqb8pfC-ovH7zm2jYr4K4qLxQw-o&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"zfGGIlFz_-s","session_youtube_ff_link":"https://youtu.be/zfGGIlFz_-s","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=0h24m18s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T16:24:00Z","title":"A Generic Interactive Membership Function for Categorization of Quantities","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10414267","abstract":"Traditional approaches to data visualization have often focused on comparing different subsets of data, and this is reflected in the many techniques developed and evaluated over the years for visual comparison. Similarly, common workflows for exploratory visualization are built upon the idea of users interactively applying various filter and grouping mechanisms in search of new insights. This paradigm has proven effective at helping users identify correlations between variables that can inform thinking and decision-making. However, recent studies show that consumers of visualizations often draw causal conclusions even when not supported by the data. Motivated by these observations, this article highlights recent advances from a growing community of researchers exploring methods that aim to directly support visual causal inference. However, many of these approaches have their own limitations, which limit their use in many real-world scenarios. This article, therefore, also outlines a set of key open challenges and corresponding priorities for new research to advance the state of the art in visual causal inference.","accessible_pdf":false,"authors":[{"affiliations":"","email":"borland@renci.org","is_corresponding":false,"name":"David Borland"},{"affiliations":"","email":"zeyuwang@cs.unc.edu","is_corresponding":false,"name":"Arran Zeyu Wang"},{"affiliations":"","email":"gotz@unc.edu","is_corresponding":false,"name":"David Gotz"}],"award":"","doi":"10.1109/MCG.2023.3338788","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10414267","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10414267","image_caption":"A counterfactual subset includes data points from the excluded set that closely resemble those in the included set. Previous research indicates that visualizations comparing the counterfactual subset with the included subset (c) lead to more accurate causal inferences than traditional methods (b). This work will share our vision for how counterfactual concepts developed by the causal inference community can be leveraged to enable the development of more effective visualization technologies.","keywords":["Analytical Models, Correlation, Visual Analytics, Decision Making, Data Visualization, Reliability Theory, Cognition, Inference Algorithms, Causal Inference, Causality, Social Media, Exploratory Analysis, Data Visualization, Visual Representation, Visual Analysis, Visualization Tool, Open Challenges, Interactive Visualization, Assembly Line, Different Subsets Of Data, Visual Analytics Tool, Data Driven Decision Making, Data Quality, Statistical Models, Causal Effect, Visual System, Use Of Social Media, Bar Charts, Causal Model, Causal Graph, Chart Types, Directed Acyclic Graph, Visual Design, Portion Of The Dataset, Causal Structure, Prior Section, Causal Explanations, Line Graph"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2401.08411","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Fy9c_xgh_I8&t=0h50m24s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10414267/v-cga-10414267_Preview.mp4?token=FVxCuB6xLp1DUV6Sm1Gh4cxqFbnyw2q5gHteaBAiyUw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10414267/v-cga-10414267_Preview.srt?token=CY0_DiPO-Mt3EyALCu_X6qEaEG2xb9-IPY8DehrAEN0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"N6USrLE8yfo","session_youtube_ff_link":"https://youtu.be/N6USrLE8yfo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=0h50m24s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T16:48:00Z","title":"Using Counterfactuals to Improve Causal Inferences From Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-cga-10478355","abstract":"Recent developments in artificial intelligence (AI) and machine learning (ML) have led to the creation of powerful generative AI methods and tools capable of producing text, code, images, and other media in response to user prompts. Significant interest in the technology has led to speculation about what fields, including visualization, can be augmented or replaced by such approaches. However, there remains a lack of understanding about which visualization activities may be particularly suitable for the application of generative AI. Drawing on examples from the field, we map current and emerging capabilities of generative AI across the different phases of the visualization lifecycle and describe salient opportunities and challenges.","accessible_pdf":true,"authors":[{"affiliations":"","email":"rahul.basole@accenture.com","is_corresponding":false,"name":"Rahul C. Basole"},{"affiliations":"","email":"timothy.major@accenture.com","is_corresponding":true,"name":"Timothy Major"}],"award":"","doi":"10.1109/MCG.2024.3362168","event_id":"v-cga","event_title":"CG&A Invited Partnership Presentations","external_paper_link":"","fno":"10478355","has_fno":true,"has_image":true,"has_pdf":"","id":"v-cga-10478355","image_caption":"The iterative phases of the end-to-end visualization workflow (A-G) and types of generative AI opportunities (Creativity, Co-Pilot, and Automation) within them.","keywords":["Generative AI, Art, Artificial Intelligence, Machine Learning, Visualization, Media, Augmented Reality, Machine Learning, Visual Representation, Professional Knowledge, Creative Process, Domain Experts, Generalization Capability, Development Of Artificial Intelligence, Artificial Intelligence Capabilities, Iterative Process, Natural Language, Commercial Software, Hallucinations, Team Sports, Design Requirements, Intelligence Agencies, Recommender Systems, User Requirements, Iterative Design, Use Of Artificial Intelligence, Visual Design, Phase Assemblage, Data Literacy"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Fy9c_xgh_I8&t=1h0m30s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10478355/v-cga-10478355_Preview.mp4?token=bntoA5E5ZfCGu-KJFGG0JyRkowZQF3EIwF7Kt-WhyqU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-cga/v-cga-10478355/v-cga-10478355_Preview.srt?token=Z5yTk4Adf9lkH6U9OqZvHtSDFifTio2hnkzEt1-bZuE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"cga2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"CG&A: Systems, Theory, and Evaluations","session_uid":"v-cga","session_youtube_ff_id":"UDI3JoGu2Qs","session_youtube_ff_link":"https://youtu.be/UDI3JoGu2Qs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Fy9c_xgh_I8&t=1h0m30s","sessions":["CG&A: Systems, Theory, and Evaluations"],"time_stamp":"2024-10-17T17:00:00Z","title":"Generative AI for Visualization: Opportunities and Challenges","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-2860","abstract":"Visualization of spatial datasets is essential for understanding biological systems that are composed of several interacting cell types. For example, gene expression data at the molecular level needs to be interpreted based on cell type, spatial context, tissue type, and interactions with the surrounding environment. Recent advances in spatial profiling technologies allow measurements of the level of thousands of proteins or genes at different spatial locations along with corresponding cellular composition. Representing such high dimensional data effectively to facilitate data interpretation is a major challenge. Existing methods such as spatially plotted pie or dot charts obscure underlying tissue regions and necessitate switching between different views for accurate interpretations. Here, we present TissuePlot, a novel method for visualizing spatial data at molecular, cellular and tissue levels in the context of their spatial locations. To this end, TissuePlot employs a transparent hexagon tessellation approach that utilizes object borders to represent cell composition or gene-level data without obscuring the underlying tissue image. Additionally, it offers a multi-view interactive web app, that allows interrogating spatial tissue data at multiple scales linking molecular information to tissue anatomy and motifs. We demonstrate TissuePlot utility using mouse brain data from the Bio+MedVis Redesign Challenge 2024. Our tool is accessible at https://sailem-group.github.io/TissuePlot.","accessible_pdf":false,"authors":[{"affiliations":["King's College London, London, United Kingdom"],"email":"heba.sailem@kcl.ac.uk","is_corresponding":true,"name":"Heba Zuhair Sailem"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-2860","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/6sxFyy5SXQ0&t=1h9m14s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=1h9m14s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"TissuePlot: A Multi-Scale Interactive Web App For Visualizing Spatial Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-3099","abstract":"For the Bio+Med-Vis Challenge 2024, we propose a visual analytics system as a redesign for the scatter pie chart visualization of cell type proportions of spatial transcriptomics data. Our design uses three linked views: a view of the histological image of the tissue, a stacked bar chart showing cell type proportions of the spots, and a scatter plot showing a dimensionality reduction of the multivariate proportions. Furthermore, we apply a compositional data analysis framework, the Aitchison geometry, to the proportions for dimensionality reduction and k-means clustering. Leveraging brushing and linking, the system allows one to explore and uncover patterns in the cell type mixtures and relate them to their spatial locations on the cellular tissue. This redesign shifts the pattern recognition workload from the human visual system to computational methods commonly used in visual analytics. We provide the code and setup instructions of our visual analytics system on GitHub.(https://github.com/UniStuttgart-VISUS/va-for-spatial-transcriptomics)","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"david.haegele@visus.uni-stuttgart.de","is_corresponding":true,"name":"David H\u00e4gele"},{"affiliations":["University of Stuttgart , Stuttgart , Germany"],"email":"st189806@stud.uni-stuttgart.de","is_corresponding":false,"name":"Yuxuan Tang"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-3099","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/6sxFyy5SXQ0&t=1h22m23s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=1h22m23s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"Visual Compositional Data Analytics for Spatial Transcriptomics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-4384","abstract":"We introduce a novel method for overlaying cell type proportion data onto tissue images. This approach preserves spatial context while avoiding visual clutter or excessively obscuring the underlying slide. Our proposed technique involves clustering the data and aggregating neighboring points of the same cluster into polygons.","accessible_pdf":false,"authors":[{"affiliations":["NIH, Rockville, United States","Queen's University, Belfast, United Kingdom"],"email":"masonlk@nih.gov","is_corresponding":true,"name":"Lee Mason"},{"affiliations":["National Institutes of Health, Rockville, United States"],"email":"jonas.dealmeida@nih.gov","is_corresponding":false,"name":"Jonas S Almeida"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-4384","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/6sxFyy5SXQ0&t=0h56m24s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=0h56m24s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"A Simplified Positional Cell Type Visualization using Spatially Aggregated Clusters","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-4393","abstract":"The 3D Cycled Immunofluorescence (CyCIF) technique produces high-resolution multiplexed images, often representing a large number of biomarkers. With current visualization tools, it is hard to identify the important subset of markers and locate notable regions within the tissue. To address this challenge, we propose an LLM-supported agent to navigate 3D CyCIF Imaging that interprets a novice user's natural language queries, identifies relevant markers, and locates significant regions within the tissue. Our results demonstrate the agent's ability to dynamically update views, answering various queries, from general questions to specific region-based requests.","accessible_pdf":false,"authors":[{"affiliations":["The University of Texas at Arlington, Arlington, United States"],"email":"acd9300@mavs.uta.edu","is_corresponding":true,"name":"Aarti Darji"},{"affiliations":["DBMI, Boston, United States"],"email":"ericmoerth@g.harvard.edu","is_corresponding":false,"name":"Eric Moerth"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"morgan_turner@hms.harvard.edu","is_corresponding":false,"name":"Morgan L Turner"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"david_kouril@hms.harvard.edu","is_corresponding":false,"name":"David Kou\u0159il"},{"affiliations":["The University of Texas at Arlington, Arlington, United States"],"email":"jacob.luber@uta.edu","is_corresponding":false,"name":"Jacob Luber"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-4393","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/6sxFyy5SXQ0&t=1h36m18s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=1h36m18s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"LLM - Supported Exploration of 3D Microscopy Imaging","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-8493","abstract":"The objective of the Redesign Challenge of the Bio+MedVis Challenge @ IEEE VIS 2024 is to redesign an existing visualization of multi-cell gene expressions of tissue samples. In this, multiple cells are accumulated into pixels. For each pixel the visualization should convey the prevalence and extent of cell types it is composed of, i.e., a proportional relation. The provided baseline technique of superimposed Pie charts -- a common technique for this kind of relation -- is not an ideal choice as the cell-type quantities of neighboring pixels are hard to compare due to a spatial disarray inherent to pie charts. This limits the perception of regions with coherent cell-type compositions, which constitutes one of the essential visual analytics tasks. We propose a novel marker design: \\emph{Droplets} -- a space-saving design for visually enhancing the presence of clusters and regional borders. We evaluate this concept for the given tissue sample and compare it to the given baseline and other alternatives.","accessible_pdf":false,"authors":[{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"s.lengauer@cgv.tugraz.at","is_corresponding":true,"name":"Stefan Lengauer"},{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"peter.waldert@cgv.tugraz.at","is_corresponding":false,"name":"Peter Waldert"},{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"tobias.schreck@cgv.tugraz.at","is_corresponding":false,"name":"Tobias Schreck"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-8493","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/6sxFyy5SXQ0&t=1h32m33s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=1h32m33s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"Droplets: A Marker Design for visually enhancing Local Cluster Association","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-biomedchallenge-9833","abstract":"Spatial transcriptomics methods capture cellular measurements such as gene expression and cell types at specific locations in a cell, helping provide a localized picture of tissue health. Traditional visualization techniques superimpose the tissue image with pie charts for the cell distribution. We design an interactive visual analysis system that addresses perceptual problems in the state of the art, while adding filtering, drilling, and clustering analysis capabilities. Our approach can help researchers gain deeper insights into the molecular mechanisms underlying complex biological processes within tissues.","accessible_pdf":false,"authors":[{"affiliations":["University of Illinois Chicago, Chicago, United States"],"email":"szhao69@uic.edu","is_corresponding":true,"name":"Siyuan Zhao"},{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"g.elisabeta.marai@gmail.com","is_corresponding":false,"name":"G. Elisabeta Marai"}],"award":"","doi":"","event_id":"a-biomedchallenge","event_title":"Bio+MedVis Challenges","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-biomedchallenge-9833","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/6sxFyy5SXQ0&t=1h42m41s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Bio+Med+Vis Workshop","session_uid":"a-biomedchallenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/6sxFyy5SXQ0&t=1h42m41s","sessions":["Bio+Med+Vis Workshop"],"time_stamp":"2024-10-13T16:00:00Z","title":"A Part-to-Whole Circular Cell Explorer","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1002","abstract":"We present an interactive visual analysis tool to explore large dynamic graphs. Our system provides users with multiple perspectives to analyze the network. The graph view presents the node-link structure and offers various layout options. To complement, a temporal view shows both the overall temporal distribution and detailed event timelines. The system also supports flexible filtering to reduce the graph size and identify interesting entities. One bonus feature of our system is the provenance map, which visualizes the automatically captured user interactions and allows users to record their findings. The provenance map is helpful for organizing the exploration process and synthesizing analysis results.","accessible_pdf":false,"authors":[{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"yuhan.guo@pku.edu.cn","is_corresponding":true,"name":"Yuhan Guo"},{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"luoyuchu@pku.edu.cn","is_corresponding":false,"name":"Yuchu Luo"},{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"cxyapril@stu.pku.edu.cn","is_corresponding":false,"name":"Xinyue Chen"},{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"hanning.shao@pku.edu.cn","is_corresponding":false,"name":"Hanning Shao"},{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"xiaoru.yuan@pku.edu.cn","is_corresponding":false,"name":"Xiaoru Yuan"},{"affiliations":["University of Nottingham, Nottingham, United Kingdom","University of Nottingham, Nottingham, United Kingdom"],"email":"kai.xu@nottingham.ac.uk","is_corresponding":false,"name":"Kai Xu"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1002","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HhIQktS5HZs&t=2h2m54s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=2h2m54s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Visual Analysis of Complex Temporal Networks Supported by Analytic Provenance","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1006","abstract":"The exposure of illegal fishing by SouthSeafood Express Corp highlights the urgent need for better tools to monitor commercial fishing in Oceanus. In response, we develop an interactive visualization tool for the VAST Challenge\u2019s Mini-Challenge 2. Our system analyzes the CatchNet knowledge graph, combining vessel tracking and port records from FishEye International, a non-profit dedicated to combating illegal fishing. The tool links vessels to probable cargos, identifies seasonal trends, and detects anomalies in port records. Detects suspicious activity of vessels, offering actionable insights to aid investigations and prevent future illegal fishing.","accessible_pdf":false,"authors":[{"affiliations":["Getulio Vargas Foundation, Rio de Janeiro, Brazil"],"email":"jherediaparillo@gmail.com","is_corresponding":false,"name":"Juanpablo Andrew Heredia"},{"affiliations":["Get\u00falio Vargas Foundation, Rio de Janeiro, Brazil"],"email":"fabricio.venturim@fgv.edu.br","is_corresponding":false,"name":"Fabr\u00edcio Venturim"},{"affiliations":["Funda\u00e7\u00e3o Getulio Vargas, Rio de Janeiro, Brazil"],"email":"dany.diaz@ucsp.edu.pe","is_corresponding":false,"name":"Dany Mauro Diaz Espino"},{"affiliations":["FGV, Rio de Janeiro, Brazil"],"email":"felipe.moreno.vera@gmail.com","is_corresponding":false,"name":"Felipe Moreno-Vera"},{"affiliations":["Funda\u00e7\u00e3o Getulio Vargas, Rio de Janeiro, Brazil"],"email":"jpocom@gmail.com","is_corresponding":false,"name":"Jorge Poco"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1006","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Prerecorded video (VAST Challenge submission ID 1004)","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1013","abstract":"This paper addresses the visualization challenges posed by Mini Challenge 3 of the VAST Challenge 2024, which involves detecting illegal fishing activities within a dynamic network of companies and individuals. The task requires effective anomaly detection in a time-dependent knowledge graph, a scenario where conventional graph visualization tools often fall short due to their limited ability to integrate temporal data and the undefined nature of the anomalies. We demonstrate how to overcome these challenges through well-crafted views implemented in standard software libraries. Our approach involves decomposing the time-dependent knowledge graph into separate time and structure components, as well as providing data-driven guidance for identifying anomalies. These components are then interconnected through extensive interactivity, enabling exploration of anomalies in a complex, temporally evolving network. The source code and a demonstration video are publicly available at github.com/MaAllma/Temporal/Knowledge/Graph/Analysis.","accessible_pdf":false,"authors":[{"affiliations":["RPTU in Kaiserslautern, Kaiserslautern, Germany"],"email":"allmann@rhrk.uni-kl.de","is_corresponding":false,"name":"Magdalena Allmann"},{"affiliations":["RPTU in Kaiserslautern, Kaiserslautern, Germany"],"email":"iselborn@rptu.de","is_corresponding":true,"name":"Kevin Iselborn"},{"affiliations":["University of Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"j_sohns12@cs.uni-kl.de","is_corresponding":false,"name":"Jan-Tobias Sohns"},{"affiliations":["University of Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"leitte@cs.uni-kl.de","is_corresponding":false,"name":"Heike Leitte"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1013","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HhIQktS5HZs&t=2h18m42s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=2h18m42s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Visual Anomaly Detection in Temporal Knowledge Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1016","abstract":"This paper presents the comprehensive analysis and visualizations developed by the FES-MC2-1 team for the VAST Challenge 2024, Mini-Challenge 2. The challenge required us to analyze port exit records, transponder ping data, and cargo delivery reports to asso- ciate vessels with their probable cargos, identify seasonal trends and anomalies, and detect illegal fishing activities by SouthSeafood Express Corp vessels. Utilizing a combination of advanced visual analytics tools\u2014including Tableau, Python, React, Docker, Postgresql, Nginx and custom-developed solutions from the University of Konstanz\u2014our team uncovered patterns in the data that reveal suspicious activities and significant shifts in fishing behavior following the crackdown on illegal operations.","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"sinem-bilge.gueler@uni-konstanz.de","is_corresponding":true,"name":"Sinem Bilge Guler"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"mehmet-emre.sahin@uni-konstanz.de","is_corresponding":false,"name":"Mehmet Emre Sahin"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"funda.yildiz-aydin@uni-konstanz.de","is_corresponding":false,"name":"Funda Yildiz-Aydin"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"u.schlegel@uni-konstanz.de","is_corresponding":false,"name":"Udo Schlegel"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1016","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"VAST 2024-MC2 Challenge","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1018","abstract":"In this work, we present a visual analytics approach designed to address the 2024 VAST Challenge Mini-Challenge 1, which focuses on detecting bias in a knowledge graph. Our solution utilizes pixel-based visualizations to explore patterns within the knowledge graph, CatchNet, which is employed to identify potential illegal fishing activities. CatchNet is constructed by FishEye analysts who aggregate open-source data, including news articles and public reports. They have recently begun incorporating knowledge extracted from these sources using advanced language models. Our method combines pixel-based visualizations with ordering techniques and sentiment analysis to uncover hidden patterns in both the news articles and the knowledge graph. Notably, our analysis reveals that news articles covering critiques and convictions of companies are subject to elevated levels of bias.","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"raphael.buchmueller@uni-konstanz.de","is_corresponding":false,"name":"Raphael Buchm\u00fcller"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"daniel.fuerst@uni-konstanz.de","is_corresponding":true,"name":"Daniel F\u00fcrst"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"alexander.frings@uni-konstanz.de","is_corresponding":false,"name":"Alexander Frings"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"u.schlegel@uni-konstanz.de","is_corresponding":false,"name":"Udo Schlegel"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1018","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HhIQktS5HZs&t=0h29m10s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=0h29m10s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"UKON-Buchmueller-MC1","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1019","abstract":"The SunSpot project is a comprehensive solution to address the 2024 IEEE VAST Challenge MC2, focusing on detecting abnormal vessel activities. Our method integrated data on fishing records, vessel trajectories, commodity-vessel relationships, and fish distributions. We created a set of visualizations to help analysts better understand the characteristics of the area, vessels, and fishing activities. We considered a vessel\u2019s departure from and return to a harbor as a basic cycle of activity and classified these cycles into patterns based on location and dwell time. By visualizing the spatial and temporal aspects of these cycles, we effectively distinguished illegal fishing from normal fishing activities. Our solution highlights the strengths of a multidirectional approach in data analytics, incorporating vessel information, fish origins, exported commodities, and shipping ports.","accessible_pdf":false,"authors":[{"affiliations":["West Lafayette Jr./Sr. High School, West Lafayette, United States"],"email":"ashleywqyang@gmail.com","is_corresponding":false,"name":"Ashley Yang"},{"affiliations":["Purdue University, WEST LAFAYETTE, United States"],"email":"wang5329@purdue.edu","is_corresponding":true,"name":"Hao Wang"},{"affiliations":["Northeastern University, Boston, United States"],"email":"yqq1960582321@gmail.com","is_corresponding":false,"name":"Qianlai Yang"},{"affiliations":["Purdue University, West Lafayette, United States"],"email":"yang2767@purdue.edu","is_corresponding":false,"name":"Qi Yang"},{"affiliations":["Purdue University, West Lafayette, United States"],"email":"gong224@purdue.edu","is_corresponding":false,"name":"Ziqian Gong"},{"affiliations":["Purdue University, West Lafayette, United States"],"email":"zhou1471@purdue.edu","is_corresponding":false,"name":"Zizun Zhou"},{"affiliations":["Purdue University, West Lafayette, United States"],"email":"qianz@purdue.edu","is_corresponding":false,"name":"Zhenyu Cheryl Qian"},{"affiliations":["Purdue University, West Lafayette, United States"],"email":"victorchen@purdue.edu","is_corresponding":false,"name":"Yingjie Victor Chen"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1019","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HhIQktS5HZs&t=1h29m9s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=1h29m9s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Purdue-Chen-MC2","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1021","abstract":"In this paper we present an interactive visualization system for solving IEEE VAST Challenge 2024 Mini-Challenge 1. Our system enables interactive exploration and mining of the knowledge graph, assists in identifying suspicious bias and provides corresponding evidence from multiple perspectives. For the convenience of user exploration, our system supports recording the exploration process and preservation of evidence. The illustrative case proves the effectiveness of our system.","accessible_pdf":false,"authors":[{"affiliations":["Fudan University, Shanghai, China"],"email":"qiuttt@foxmail.com","is_corresponding":true,"name":"Tian Qiu"},{"affiliations":["Fudan University, Shanghai, China"],"email":"20302010026@fudan.edu.cn","is_corresponding":false,"name":"Yi Shan"},{"affiliations":["Fudan University, Shanghai, China"],"email":"3504936154@qq.com","is_corresponding":false,"name":"Xueli Shu"},{"affiliations":["Fudan University, Shanghai, China"],"email":"philipethanzg@gmail.com","is_corresponding":false,"name":"Aolin Guo"},{"affiliations":["Fudan University, Shanghai, China"],"email":"18812571619@163.com","is_corresponding":false,"name":"Qianhui Li"},{"affiliations":["school of data science, Shanghai , China"],"email":"guomeng200210@163.com","is_corresponding":false,"name":"Meng Guo"},{"affiliations":["Fudan University, Shanghai, China"],"email":"simingchen3@gmail.com","is_corresponding":false,"name":"Siming Chen"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1021","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HhIQktS5HZs&t=0h44m42s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=0h44m42s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"FishEye Watcher: a visual analytics system for knowledge graph bias detection","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1023","abstract":"To solve the 2024 VAST Challenge MC3, we use PageRank and different filtering techniques to select nodes or components of interest. We then use TimeArc, a data visualization technique to visualize the evolution of the corporate structure of these nodes and serve as a tool to investigate and confirm this suspicious behavior. We used these techniques to investigate many nodes including the given SouthSeafood Express Corp that was involved in illegal activity. We discovered a few key features associated with anomalous nodes such as instances of founding shell companies and large power transfers.","accessible_pdf":false,"authors":[{"affiliations":["Texas Tech University, Lubbock, United States"],"email":"ewei341@gmail.com","is_corresponding":false,"name":"Ethan Wei"},{"affiliations":["Texas Tech Univeristy, Lubbock, United States"],"email":"tnhondan@gmail.com","is_corresponding":false,"name":"Tommy Dang"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1023","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Prerecorded video (VAST Challenge submission ID 1024)","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1028","abstract":"Identifying unreliable sources is crucial for preventing misinformation and making informed decisions. CatchNet, the Oceanus Knowledge Graph, contains biased perspectives that threaten its credibility. We use Large Language Models (LLMs) and interactive visualization systems to identify these biases. By analyzing police reports and using GPT-3.5 to extract information from articles, we establish the ground truth for our analysis. Our visual analytics system detects anomalies, revealing unreliable news sources such as The News Buoy and biased analysts such as Harvey Janus and Junior Shurdlu.","accessible_pdf":false,"authors":[{"affiliations":["Funda\u00e7\u00e3o Getulio Vargas, Rio de Janeiro, Brazil","Funda\u00e7\u00e3o Getulio Vargas, Rio de Janeiro, Brazil"],"email":"dany.diaz@ucsp.edu.pe","is_corresponding":true,"name":"Dany Mauro Diaz Espino"},{"affiliations":["FGV, Rio de Janeiro, Brazil","FGV, Rio de Janeiro, Brazil"],"email":"felipe.moreno.vera@gmail.com","is_corresponding":false,"name":"Felipe Moreno-Vera"},{"affiliations":["Getulio Vargas Foundation, Rio de Janeiro, Brazil","Getulio Vargas Foundation, Rio de Janeiro, Brazil"],"email":"jherediaparillo@gmail.com","is_corresponding":false,"name":"Juanpablo Andrew Heredia"},{"affiliations":["Getulio Vargas Foundation, Rio de Janeiro, Brazil","Getulio Vargas Foundation, Rio de Janeiro, Brazil"],"email":"fabricio.venturim@fgv.edu.br","is_corresponding":false,"name":"Fabr\u00edcio Venturim"},{"affiliations":["Get\u00falio Vargas Foundation, Rio de Janeiro, Brazil","Get\u00falio Vargas Foundation, Rio de Janeiro, Brazil"],"email":"jpocom@gmail.com","is_corresponding":false,"name":"Jorge Poco"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1028","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HhIQktS5HZs&t=0h8m24s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=0h8m24s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"FishBiasLens: Integrating Large Language Models and Visual Analytics for Bias Detection","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-vast-challenge-1030","abstract":"This paper presents a visual analytics system designed to address the IEEE VAST Challenge 2024 Mini-Challenge 2. The system can support the matching and anomaly detection of multi-source heterogeneous spatio-temporal data, thereby enabling the detection of illegal transport activities. The primary contribution of the system lies in its analysis-driven interaction design.","accessible_pdf":false,"authors":[{"affiliations":["Fudan University, Shanghai, China"],"email":"20302010026@fudan.edu.cn","is_corresponding":true,"name":"Yi Shan"},{"affiliations":["Fudan University, Shanghai, China"],"email":"philipethanzg@gmail.com","is_corresponding":false,"name":"Aolin Guo"},{"affiliations":["Fudan University, Shanghai, China"],"email":"gemini25szk@gmail.com","is_corresponding":false,"name":"Zekai Shao"},{"affiliations":["Fudan University, Shanghai, China"],"email":"qiuttt@foxmail.com","is_corresponding":false,"name":"Tian Qiu"},{"affiliations":["Fudan University, Shanghai, China"],"email":"3504936154@qq.com","is_corresponding":false,"name":"Xueli Shu"},{"affiliations":["Fudan University, Shanghai, China"],"email":"18812571619@163.com","is_corresponding":false,"name":"Qianhui Li"},{"affiliations":["Fudan University, Shanghai, China"],"email":"simingchen3@gmail.com","is_corresponding":false,"name":"Siming Chen"}],"award":"","doi":"","event_id":"a-vast-challenge","event_title":"VAST Challenge","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-vast-challenge-1030","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HhIQktS5HZs&t=1h3m39s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest2","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"VAST Challenge","session_uid":"a-vast-challenge","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HhIQktS5HZs&t=1h3m39s","sessions":["VAST Challenge"],"time_stamp":"2024-10-13T12:30:00Z","title":"Visual Analytics for Detecting Illegal Transport Activities","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-scivis-contest-1","abstract":"","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yiming Shao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chengming Liu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zhiyuan Meng"},{"affiliations":"","email":"","is_corresponding":false,"name":"Shufan Qian"},{"affiliations":"","email":"","is_corresponding":false,"name":"Peng Jiang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yunhai Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Dr. Qiong Zeng"}],"award":"","doi":"","event_id":"a-scivis-contest","event_title":"SciVis Contest","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-scivis-contest-1","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/paQRbfA0tdg&t=0h10m48s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest3","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"SciVis Contest","session_uid":"a-scivis-contest","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/paQRbfA0tdg&t=0h10m48s","sessions":["SciVis Contest"],"time_stamp":"2024-10-14T12:30:00Z","title":"PlumeViz: Interactive Exploration for Multi-Facet Features of Hydrothermal Plumes in Sonar Images","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-scivis-contest-2","abstract":"","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Ngan V. T. Nguyen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Minh N. A. Tran"},{"affiliations":"","email":"","is_corresponding":false,"name":"Si Chi Hoang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Vuong Tran Thien"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nguyen Tran Nguyen Thanh"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ngo Ly"},{"affiliations":"","email":"","is_corresponding":false,"name":"Phuc Thien Nguyen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Sinh Huy Gip"},{"affiliations":"","email":"","is_corresponding":false,"name":"Sang Thanh Ngo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nguy\u1ec5n Th\u00e1i H\u00f2a"}],"award":"","doi":"","event_id":"a-scivis-contest","event_title":"SciVis Contest","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-scivis-contest-2","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest3","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"SciVis Contest","session_uid":"a-scivis-contest","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["SciVis Contest"],"time_stamp":"2024-10-14T12:30:00Z","title":"Visualization of Sonar Imaging for Hydrothermal Systems","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-scivis-contest-3","abstract":"","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Adhitya Kamakshidasan"},{"affiliations":"","email":"","is_corresponding":true,"name":"Harikrishnan Pattathil"}],"award":"","doi":"","event_id":"a-scivis-contest","event_title":"SciVis Contest","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-scivis-contest-3","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/paQRbfA0tdg&t=0h25m7s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"contest3","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"SciVis Contest","session_uid":"a-scivis-contest","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/paQRbfA0tdg&t=0h25m7s","sessions":["SciVis Contest"],"time_stamp":"2024-10-14T12:30:00Z","title":"Topology Based Visualization of Hydrothermal Plumes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1031","abstract":"In soccer, player scouting aims to find players suitable for a team to increase the winning chance in future matches. To scout suitable players, coaches and analysts need to consider whether the players will perform well in a new team, which is hard to learn directly from their historical performances. Match simulation methods have been introduced to scout players by estimating their expected contributions to a new team. However, they usually focus on the simulation of match results and hardly support interactive analysis to navigate potential target players and compare them in fine-grained simulated behaviors. In this work, we propose a visual analytics method to assist soccer player scouting based on match simulation. We construct a two-level match simulation framework for estimating both match results and player behaviors when a player comes to a new team. Based on the framework, we develop a visual analytics system, Team-Scouter, to facilitate the simulative-based soccer player scouting process through player navigation, comparison, and investigation. With our system, coaches and analysts can find potential players suitable for the team and compare them on historical and expected performances. For an in-depth investigation of the players' expected performances, the system provides a visual comparison between the simulated behaviors of the player and the actual ones. The usefulness and effectiveness of the system are demonstrated by two case studies on a real-world dataset and an expert interview.","accessible_pdf":false,"authors":[{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"caoanqi28@163.com","is_corresponding":true,"name":"Anqi Cao"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"xxie@zju.edu.cn","is_corresponding":false,"name":"Xiao Xie"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"2366385033@qq.com","is_corresponding":false,"name":"Runjin Zhang"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"1282533692@qq.com","is_corresponding":false,"name":"Yuxin Tian"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"fanmu_032@zju.edu.cn","is_corresponding":false,"name":"Mu Fan"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"zhang_hui@zju.edu.cn","is_corresponding":false,"name":"Hui Zhang"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1031","image_caption":"System user interface. The interface contains two views: a navigation view (A) and an investigation view (B). The navigation view consists of a squad board (A1) to navigate players will be replaced and a player ranking list (A2) to compare players by personal information and performances. The investigation view includes an on-ball tactic list (B1) for exploring essential on-ball tactics, a player record list (B2) to compare players' simulated actions under a certain on-ball tactic, and a simulated action map (B3) to display players' detailed simulated actions.","keywords":["Soccer Visualization, Player Scouting, Design Study"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/SOrXiceBb2g&t=0h0m6s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1031/v-full-1031_Preview.mp4?token=lZJeB6Xf0ge_YdGbBTun3RHwlGjYyPFmJbqOYHb7olQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1031/v-full-1031_Preview.srt?token=ioob5IkjvKKKvgqxCtdJinqPHLJNypauW9vO7Yg4uxw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-full","session_youtube_ff_id":"p07D01bK_fs","session_youtube_ff_link":"https://youtu.be/p07D01bK_fs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=0h0m6s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T14:15:00Z","title":"Team-Scouter: Simulative Visual Analytics of Soccer Player Scouting","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1099","abstract":"Tactics play an important role in team sports by guiding how players interact on the field. Both sports fans and experts have a demand for analyzing sports tactics. Existing approaches allow users to visually perceive the multivariate tactical effects. However, these approaches require users to experience a complex reasoning process to connect the multiple interactions within each tactic to the final tactical effect. In this work, we collaborate with basketball experts and propose a progressive approach to help users gain a deeper understanding of how each tactic works and customize tactics on demand. Users can progressively sketch on a tactic board, and a coach agent will simulate the possible actions in each step and present the simulation to users with facet visualizations. We develop an extensible framework that integrates large language models (LLMs) and visualizations to help users communicate with the coach agent with multimodal inputs. Based on the framework, we design and develop Smartboard, an agent-based interactive visualization system for fine-grained tactical analysis, especially for play design. Smartboard provides users with a structured process of setup, simulation, and evolution, allowing for iterative exploration of tactics based on specific personalized scenarios. We conduct case studies based on real-world basketball datasets to demonstrate the effectiveness and usefulness of our system.","accessible_pdf":false,"authors":[{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ziao_liu@outlook.com","is_corresponding":true,"name":"Ziao Liu"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"xxie@zju.edu.cn","is_corresponding":false,"name":"Xiao Xie"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"3170101799@zju.edu.cn","is_corresponding":false,"name":"Moqi He"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"zhao_ws@zju.edu.cn","is_corresponding":false,"name":"Wenshuo Zhao"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"wuyihong0606@gmail.com","is_corresponding":false,"name":"Yihong Wu"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"lycheecheng@zju.edu.cn","is_corresponding":false,"name":"Liqi Cheng"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"zhang_hui@zju.edu.cn","is_corresponding":false,"name":"Hui Zhang"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1099","image_caption":"The system interface of Smartboard. (A) The chat view provides system feedback and enhances communication between users and the system through tag selections and open-question answering. (B) The setup view provides interactions during tactical setup with tactics sketching, matchup analysis, and situation retrieval. (C) The simulation view presents the coach agent's recommended tactics, along with explanations and evaluations in both overview and detail. (D) The history view records users' tactics and provides the classic tactics for starting exploration.","keywords":["Sports visualization, tactic board, tactical analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/SOrXiceBb2g&t=0h25m16s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1099/v-full-1099_Preview.mp4?token=EIieXd4BF-80sryrgj0510wUCzzsyFdEgrbI1J5DqPY&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-full","session_youtube_ff_id":"LQ89KZHc_uY","session_youtube_ff_link":"https://youtu.be/LQ89KZHc_uY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=0h25m16s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T14:39:00Z","title":"Smartboard: Visual Exploration of Team Tactics with LLM Agent","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1351","abstract":"As basketball\u2019s popularity surges, fans often find themselves confused and overwhelmed by the rapid game pace and complexity. Basketball tactics, involving a complex series of actions, require substantial knowledge to be fully understood. This complexity leads to a need for additional information and explanation, which can distract fans from the game. To tackle these challenges, we present Sportify, a Visual Question Answering system that integrates narratives and embedded visualization for demystifying basketball tactical questions, aiding fans in understanding various game aspects. We propose three novel action visualizations (i.e., Pass, Cut, and Screen) to demonstrate critical action sequences. To explain the reasoning and logic behind players\u2019 actions, we leverage a large-language model (LLM) to generate narratives. We adopt a storytelling approach for complex scenarios from both first and third-person perspectives, integrating action visualizations. We evaluated Sportify with basketball fans to investigate its impact on understanding of tactics, and how different personal perspectives of narratives impact the understanding of complex tactic with action visualizations. Our evaluation with basketball fans demonstrates Sportify\u2019s capability to deepen tactical insights and amplify the viewing experience. Furthermore, third-person narration assists people in getting in-depth game explanations while first-person narration enhances fans\u2019 game engagement.","accessible_pdf":true,"authors":[{"affiliations":["Harvard University, Allston, United States"],"email":"chungyi347@gmail.com","is_corresponding":true,"name":"Chunggi Lee"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"mlin@g.harvard.edu","is_corresponding":false,"name":"Tica Lin"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"pfister@seas.harvard.edu","is_corresponding":false,"name":"Hanspeter Pfister"},{"affiliations":["University of Minnesota-Twin Cities, Minneapolis, United States"],"email":"ztchen@umn.edu","is_corresponding":false,"name":"Chen Zhu-Tian"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1351","image_caption":"Sportify explains tactic questions in each clip for everyone, aiming to engage users and foster a love for sports. We integrate embedded visualization and personified narratives generated by large language model (LLM) to elucidate a complex series of actions through action detection, tactic classifier, and LLM pipelines.","keywords":["Embedded Visualization, Narrative and storytelling, Basketball tactic, Question-answering (QA) system"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/SOrXiceBb2g&t=0h12m34s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1351/v-full-1351_Preview.mp4?token=bIGHDjLkrgJQpolXS_FEYprMGOPhEwvYQLjlBgRYK6Y&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1351/v-full-1351_Preview.srt?token=L7wsQyu7e4q3byPVBl_dcO_CathD-9_Gwi7yGt8AKvk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-full","session_youtube_ff_id":"IZil979U9UQ","session_youtube_ff_link":"https://youtu.be/IZil979U9UQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=0h12m34s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T14:27:00Z","title":"Sportify: Question Answering with Embedded Visualizations and Personified Narratives for Sports Video","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1571","abstract":"Effective security patrol management is critical for ensuring safety in diverse environments such as art galleries, airports, and factories. The behavior of patrols in these situations can be modeled by patrolling games. They simulate the behavior of the patrol and adversary in the building, which is modeled as a graph of interconnected nodes representing rooms. The designers of algorithms solving the game face the problem of analyzing complex graph layouts with temporal dependencies. Therefore, appropriate visual support is crucial for them to work effectively. In this paper, we present a novel tool that helps the designers of patrolling games explore the outcomes of the proposed algorithms and approaches, evaluate their success rate, and propose modifications that can improve their solutions. Our tool offers an intuitive and interactive interface, featuring a detailed exploration of patrol routes and probabilities of taking them, simulation of patrols, and other requested features. In close collaboration with experts in designing patrolling games, we conducted three case studies demonstrating the usage and usefulness of our tool. The prototype of the tool, along with exemplary datasets, is available at https://gitlab.fi.muni.cz/formela/strategy-vizualizer.","accessible_pdf":false,"authors":[{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"langm@mail.muni.cz","is_corresponding":true,"name":"Mat\u011bj Lang"},{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"469242@mail.muni.cz","is_corresponding":false,"name":"Adam \u0160t\u011bp\u00e1nek"},{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic"],"email":"514179@mail.muni.cz","is_corresponding":false,"name":"R\u00f3bert Zvara"},{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic"],"email":"rehak@fi.muni.cz","is_corresponding":false,"name":"Vojt\u011bch \u0158eh\u00e1k"},{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"kozlikova@fi.muni.cz","is_corresponding":false,"name":"Barbora Kozlikova"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1571","image_caption":"The screen of the visualization tool, featuring a Markov chain representing a patroller's strategy. On the left, there is a transition matrix providing an alternative view of the Markov chain. On the right, there is a bar chart showing the probability distribution in time of the patroller's presence.","keywords":["Patrolling Games, Strategy, Graph, Heatmap, Visual Analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/SOrXiceBb2g&t=1h2m25s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1571/v-full-1571_Preview.mp4?token=f8B5YPqvr32ERZQcdM_iNJ8vQlq0Lo2mxVfog7Id2s8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1571/v-full-1571_Preview.srt?token=3NQFm8yUvCvIu1-6NfSH5ehXGG9kmvr4XDrckdLdxoM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-full","session_youtube_ff_id":"BgFsC5T5ILM","session_youtube_ff_link":"https://youtu.be/BgFsC5T5ILM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=1h2m25s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T15:15:00Z","title":"Who Let the Guards Out: Visual Support for Patrolling Games","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243394745","abstract":"The fund investment industry heavily relies on the expertise of fund managers, who bear the responsibility of managing portfolios on behalf of clients. With their investment knowledge and professional skills, fund managers gain a competitive advantage over the average investor in the market. Consequently, investors prefer entrusting their investments to fund managers rather than directly investing in funds. For these investors, the primary concern is selecting a suitable fund manager. While previous studies have employed quantitative or qualitative methods to analyze various aspects of fund managers, such as performance metrics, personal characteristics, and performance persistence, they often face challenges when dealing with a large candidate space. Moreover, distinguishing whether a fund manager's performance stems from skill or luck poses a challenge, making it difficult to align with investors' preferences in the selection process. To address these challenges, this study characterizes the requirements of investors in selecting suitable fund managers and proposes an interactive visual analytics system called FMLens. This system streamlines the fund manager selection process, allowing investors to efficiently assess and deconstruct fund managers' investment styles and abilities across multiple dimensions. Additionally, the system empowers investors to scrutinize and compare fund managers' performances. The effectiveness of the approach is demonstrated through two case studies and a qualitative user study. Feedback from domain experts indicates that the system excels in analyzing fund managers from diverse perspectives, enhancing the efficiency of fund manager evaluation and selection.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Longfei Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chen Cheng"},{"affiliations":"","email":"","is_corresponding":false,"name":"He Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xiyuan Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yun Tian"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xuanwu Yue"},{"affiliations":"","email":"","is_corresponding":false,"name":"Wong Kam-Kwai"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haipeng Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Suting Hong"},{"affiliations":"","email":"","is_corresponding":false,"name":"Quan Li"}],"award":"","doi":"10.1109/TVCG.2024.3394745","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243394745","image_caption":"FMLens consists of four views: (A) The FM Overview serves as a summary of the fund manager candidate space. (B) The Ranking View facilitates the examination of fund managers' performance evolution and supports interactive ranking. (C) The Historical Management View provides a comprehensive review of fund managers' management records. (D) The Comparison View is crafted to facilitate the comparison of fund performance among one of more fund managers.","keywords":["Financial Data, Fund Manager Selection, Visual Analytics"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/SOrXiceBb2g&t=0h37m42s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243394745/v-tvcg-20243394745_Preview.mp4?token=S_DhSMZGe1pa7MrGSlMaB6fb6AOeVwyJ8Vq6x2taTpw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243394745/v-tvcg-20243394745_Preview.srt?token=5bL8lQRkexpMVVbZFfcuYdKEFLNpR__BUaDOBP2BXqY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-tvcg","session_youtube_ff_id":"AK2XOfpvC6o","session_youtube_ff_link":"https://youtu.be/AK2XOfpvC6o","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=0h37m42s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T14:51:00Z","title":"FMLens: Towards Better Scaffolding the Process of Fund Manager Selection in Fund Investments","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243402834","abstract":"Impact dynamics are crucial for estimating the growth patterns of NFT projects by tracking the diffusion and decay of their relative appeal among stakeholders. Machine learning methods for impact dynamics analysis are incomprehensible and rigid in terms of their interpretability and transparency, whilst stakeholders require interactive tools for informed decision-making. Nevertheless, developing such a tool is challenging due to the substantial, heterogeneous NFT transaction data and the requirements for flexible, customized interactions. To this end, we integrate intuitive visualizations to unveil the impact dynamics of NFT projects. We first conduct a formative study and summarize analysis criteria, including substitution mechanisms, impact attributes, and design requirements from stakeholders. Next, we propose the Minimal Substitution Model to simulate substitutive systems of NFT projects that can be feasibly represented as node-link graphs. Particularly, we utilize attribute-aware techniques to embed the project status and stakeholder behaviors in the layout design. Accordingly, we develop a multi-view visual analytics system, namely NFTracer, allowing interactive analysis of impact dynamics in NFT transactions. We demonstrate the informativeness, effectiveness, and usability of NFTracer by performing two case studies with domain experts and one user study with stakeholders. The studies suggest that NFT projects featuring a higher degree of similarity are more likely to substitute each other. The impact of NFT projects within substitutive systems is contingent upon the degree of stakeholders\u2019 influx and projects\u2019 freshness.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yifan Cao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Qing Shi"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lucas Shen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kani Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yang Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Wei Zeng"},{"affiliations":"","email":"","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"10.1109/TVCG.2024.3402834","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243402834","image_caption":"**Figure 1:** Understanding the evolving appeal of NFT projects requires analyzing impact dynamics. NFTracer tackles this challenge with a multi-view visual analytics system, addressing limitations of existing machine learning methods. The interface offers four distinct views: (A) Propensity Analysis, (B) Mechanisms Analysis, (C) Substitution View, and (D) Impact Dynamic View. This example visualizes the multifaceted stakeholder flow (MSF) between CryptoPunks and Cool Cats, revealing co-occurring stakeholders (D1-3) and the temporal evolution of their impact dynamics (D4) through NFTracer's analytical capabilities.","keywords":["Stakeholders, Nonfungible Tokens, Social Networking Online, Visual Analytics, Network Analyzers, Measurement, Layout, Impact Dynamics Analysis, Non Fungible Tokens NF Ts, NFT Transaction Data, Substitutive Systems, Visual Analytics"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.48550/arXiv.2409.15754","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/SOrXiceBb2g&t=0h50m58s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243402834/v-tvcg-20243402834_Preview.mp4?token=p2neHggXdErinSRXo2Xkd7ZOVpBAATdOPM_U9-LNycc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243402834/v-tvcg-20243402834_Preview.srt?token=DfO05-24gws9c8vL_kEeZJVzz3xWDOx0Nz9TyRq-lug&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full1","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Sports. Games, and Finance","session_uid":"v-tvcg","session_youtube_ff_id":"00yRDSY-1Kk","session_youtube_ff_link":"https://youtu.be/00yRDSY-1Kk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/SOrXiceBb2g&t=0h50m58s","sessions":["Applications: Sports. Games, and Finance"],"time_stamp":"2024-10-17T15:03:00Z","title":"Tracing NFT Impact Dynamics in Transaction-flow Substitutive Systems with Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1290","abstract":"Visualization linters are end-user facing evaluators that automatically identify potential chart issues. These spell-checker like systems offer a blend of interpretability and customization that is not found in other forms of automated assistance. However, existing linters do not model context and have primarily targeted users who do not need assistance, resulting in obvious---even annoying---advice. We investigate these issues within the domain of color palette design, which serves as a microcosm of visualization design concerns. We contribute a GUI-based color palette linter as a design probe that covers perception, accessibility, context, and other design criteria, and use it to explore visual explanations, integrated fixes, and user defined linting rules. Through a formative interview study and theory-driven analysis, we find that linters can be meaningfully integrated into graphical contextsthereby addressing many of their core issues.We discuss implications for integrating linters into visualization tools, developing improved assertion languages, and supporting end-user tunable advice---all laying the groundwork for more effective visualization linters in any context.","accessible_pdf":true,"authors":[{"affiliations":["University of Washington, Seattle, United States","University of Utah, Salt Lake City, United States"],"email":"mcnutt.andrew@gmail.com","is_corresponding":true,"name":"Andrew M McNutt"},{"affiliations":["University of Washington, Seattle, United States"],"email":"maureen.stone@gmail.com","is_corresponding":false,"name":"Maureen Stone"},{"affiliations":["University of Washington, Seattle, United States"],"email":"jheer@uw.edu","is_corresponding":false,"name":"Jeffrey Heer"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1290","image_caption":"How do you know when what you\u2019ve done is right? Visualization linters provide concrete feedback about chart designs, but so far they have had interface issues that have limited their usefulness. This work introduces a linter (PaletteLint) for color palettes (and a GUI called Color Buddy, pictured here) that explores ways to deal with these issues.","keywords":["Linters, Color Palette Design, Design Probe, Reflection"],"open_access_supplemental_link":"https://osf.io/geauf","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.21285","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/yBF6qqK_ASs&t=0h12m6s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1290/v-full-1290_Preview.mp4?token=NkJBbD0Gt1xNNy93YGu-u2bvj6CkT8d0wcJdXjEMsHY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1290/v-full-1290_Preview.srt?token=mgzxzUfCmwcweO_V8-7UDRSbREJDumR1vwvE7SA5do0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-full","session_youtube_ff_id":"CY7ycxWmLkw","session_youtube_ff_link":"https://youtu.be/CY7ycxWmLkw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=0h12m6s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T17:57:00Z","title":"Mixing Linters with GUIs: A Color Palette Design Probe","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1595","abstract":"Assigning discriminable and harmonic colors to samples according to their class labels and spatial distribution can generate attractive visualizations and facilitate data exploration. However, as the number of classes increases, it is challenging to generate a high-quality color assignment result that accommodates all classes simultaneously. A practical solution is to organize classes into a hierarchy and then dynamically assign colors during exploration. However, existing color assignment methods fall short in generating high-quality color assignment results and dynamically aligning them with hierarchical structures. To address this issue, we develop a dynamic color assignment method for hierarchical data, which is formulated as a multi-objective optimization problem. This method simultaneously considers color discriminability, color harmony, and spatial distribution at each hierarchical level. By using the colors of parent classes to guide the color assignment of their child classes, our method further promotes both consistency and clarity across hierarchical levels. We demonstrate the effectiveness of our method in generating dynamic color assignment results with quantitative experiments and a user study.","accessible_pdf":false,"authors":[{"affiliations":["Tsinghua University, Beijing, China"],"email":"jiashu0717c@gmail.com","is_corresponding":false,"name":"Jiashu Chen"},{"affiliations":["Tsinghua University, Beijing, China"],"email":"vicayang496@gmail.com","is_corresponding":true,"name":"Weikai Yang"},{"affiliations":["Tsinghua University, Beijing, China"],"email":"jiazl22@mails.tsinghua.edu.cn","is_corresponding":false,"name":"Zelin Jia"},{"affiliations":["Tsinghua University, Beijing, China"],"email":"tarolancy@gmail.com","is_corresponding":false,"name":"Lanxi Xiao"},{"affiliations":["Tsinghua University, Beijing, China"],"email":"shixia@tsinghua.edu.cn","is_corresponding":false,"name":"Shixia Liu"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1595","image_caption":"Based on user exploration, our method dynamically selects the color range and assigns colors to classes within the range, which ensures high discriminability and harmony at each level and maintains consistency across different levels.","keywords":["Color assignment, Hierarchical Visualization, Discriminability, Harmony."],"open_access_supplemental_link":"https://osf.io/e4b5u/?view_only=68cc67c194c443b498bd2545ef551faa","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14742","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/yBF6qqK_ASs&t=0h24m59s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1595/v-full-1595_Preview.mp4?token=toj6GxPoOhr1jLn6j9nD71SzjSzQTxHw6k3YcrcXjAo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1595/v-full-1595_Preview.srt?token=jIle7eUQz-qE5I07DktS3QVaM3zuo11YTABZfXIlcjc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-full","session_youtube_ff_id":"RjtAd4XmMsU","session_youtube_ff_link":"https://youtu.be/RjtAd4XmMsU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=0h24m59s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T18:09:00Z","title":"Dynamic Color Assignment for Hierarchical Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1836","abstract":"Shape is commonly used to distinguish between categories in multi-class scatterplots. However, existing guidelines for choosing effective shape palettes rely largely on intuition and do not consider how these needs may change as the number of categories increases. Unlike color, shapes can not be represented by a numerical space, making it difficult to propose general guidelines or design heuristics for using shape effectively. This paper presents a series of four experiments evaluating the efficiency of 39 shapes across three tasks: relative mean judgment tasks, expert preference, and correlation estimation. Our results show that conventional means for reasoning about shapes, such as filled versus unfilled, are insufficient to inform effective palette design. Further, even expert palettes vary significantly in their use of shape and corresponding effectiveness. To support effective shape palette design, we developed a model based on pairwise relations between shapes in our experiments and the number of shapes required for a given design. We embed this model in a palette design tool to give designers agency over shape selection while incorporating empirical elements of perceptual performance captured in our study. Our model advances understanding of shape perception in visualization contexts and provides practical design guidelines that can help improve categorical data encodings. ","accessible_pdf":false,"authors":[{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"chint@cs.unc.edu","is_corresponding":true,"name":"Chin Tseng"},{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"zeyuwang@cs.unc.edu","is_corresponding":false,"name":"Arran Zeyu Wang"},{"affiliations":["University of Oklahoma, Norman, United States"],"email":"quadri@ou.edu","is_corresponding":false,"name":"Ghulam Jilani Quadri"},{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"danielle.szafir@cs.unc.edu","is_corresponding":false,"name":"Danielle Albers Szafir"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1836","image_caption":"We present a web-based shape recommendation tool based on our empirical studies. Users can input their target category number and preferred shape, and the tool will provide a shape palette based on a pairwise distance model between shapes generated using our experimental results. The output shape palette can also be modified by swapping out certain shapes, which the system will replace using data-driven recommendations. ","keywords":["Categorical perception, shape perception, multiclass scatterplots, visualization effectiveness, quantitative study"],"open_access_supplemental_link":"https://osf.io/5k47c/?view_only=52e6b52f69b84ceab8c8c1b897083fc3","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/yBF6qqK_ASs&t=0h36m40s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1836/v-full-1836_Preview.mp4?token=xxn-m06rWMTDUsTGJC38ewDLCvtsHQOwHMkSA2HSmDA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1836/v-full-1836_Preview.srt?token=rAB3h0TDWwRdw_o0wSQ1-fHqwgOwkRj6JvdCsF3r78Q&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-full","session_youtube_ff_id":"SSB0MEkju-s","session_youtube_ff_link":"https://youtu.be/SSB0MEkju-s","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=0h36m40s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T18:21:00Z","title":"An Empirically Grounded Approach for Designing Shape Palettes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233275925","abstract":"A contiguous area cartogram is a geographic map in which the area of each region is proportional to numerical data (e.g., population size) while keeping neighboring regions connected. In this study, we investigated whether value-to-area legends (square symbols next to the values represented by the squares' areas) and grid lines aid map readers in making better area judgments. We conducted an experiment to determine the accuracy, speed, and confidence with which readers infer numerical data values for the mapped regions. We found that, when only informed about the total numerical value represented by the whole cartogram without any legend, the distribution of estimates for individual regions was centered near the true value with substantial spread. Legends with grid lines significantly reduced the spread but led to a tendency to underestimate the values. Comparing differences between regions or between cartograms revealed that legends and grid lines slowed the estimation without improving accuracy. However, participants were more likely to complete the tasks when legends and grid lines were present, particularly when the area units represented by these features could be interactively selected. We recommend considering the cartogram's use case and purpose before deciding whether to include grid lines or an interactive legend.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Kelvin L. T. Fung"},{"affiliations":"","email":"","is_corresponding":false,"name":"Simon T. Perrault"},{"affiliations":"","email":"","is_corresponding":false,"name":"Michael T. Gastner"}],"award":"","doi":"10.1109/TVCG.2023.3275925","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233275925","image_caption":"Area cartograms resize regions based on data like population or GDP. Our user study evaluated whether legends and grid lines help readers estimate these values accurately. We found that legends and grid lines improve consistency and task completion but slow down estimation. Our findings suggest practical consideration of these features in cartogram design.","keywords":["Task Analysis, Symbols, Data Visualization, Sociology, Visualization, Switches, Mice, Cartogram, Geovisualization, Interactive Data Exploration, Quantitative Evaluation"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/yBF6qqK_ASs&t=0h50m10s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233275925/v-tvcg-20233275925_Preview.mp4?token=BulF51oG-kLnA-qi6h8E5DjKPUTmmjELljvCv1OAGGA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233275925/v-tvcg-20233275925_Preview.srt?token=j5V-AYjzUCUA03WOMhzvlbCUQ42tOZkPPmqHSHwrGK8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-tvcg","session_youtube_ff_id":"lDlvZRQYPwU","session_youtube_ff_link":"https://youtu.be/lDlvZRQYPwU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=0h50m10s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T18:33:00Z","title":"Effectiveness of Area-to-Value Legends and Grid Lines in Contiguous Area Cartograms","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233289292","abstract":"Reading a visualization is like reading a paragraph. Each sentence is a comparison: the mean of these is higher than those; this difference is smaller than that. What determines which comparisons are made first? The viewer's goals and expertise matter, but the way that values are visually grouped together within the chart also impacts those comparisons. Research from psychology suggests that comparisons involve multiple steps. First, the viewer divides the visualization into a set of units. This might include a single bar or a grouped set of bars. Then the viewer selects and compares two of these units, perhaps noting that one pair of bars is longer than another. Viewers might take an additional third step and perform a second-order comparison, perhaps determining that the difference between one pair of bars is greater than the difference between another pair. We create a visual comparison taxonomy that allows us to develop and test a sequence of hypotheses about which comparisons people are more likely to make when reading a visualization. We find that people tend to compare two groups before comparing two individual bars and that second-order comparisons are rare. Visual cues like spatial proximity and color can influence which elements are grouped together and selected for comparison, with spatial proximity being a stronger grouping cue. Interestingly, once the viewer grouped together and compared a set of bars, regardless of whether the group is formed by spatial proximity or color similarity, they no longer consider other possible groupings in their comparisons.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Cindy Xiong Bearfield"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chase Stokes"},{"affiliations":"","email":"","is_corresponding":false,"name":"Andrew Lovett"},{"affiliations":"","email":"","is_corresponding":false,"name":"Steven Franconeri"}],"award":"","doi":"10.1109/TVCG.2023.3289292","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233289292","image_caption":"When designing simple bar charts depicting the revenue of two companies A and B in two regions East and West, one can group the bars spatially by company such that West A and East A are closer together, and West B and East B are close together. One can also add color to the bars, such as coloring the two A bars the same color, and the two B bars the same color. We compared the spatial proximity cue against the color cue, and found people to prioritize the spatial proximity cue when making comparisons. That is, they are more likely to group bars that are next to each other, even if they have different colors, to be compared to bars further away. They are less likely to group bars that further away from each other even if they have the same color.","keywords":["comparison, perception, visual grouping, bar charts, verbal conclusions."],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/yBF6qqK_ASs&t=1h0m57s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233289292/v-tvcg-20233289292_Preview.mp4?token=FioY-BPSr_2MbWrgkD2XLq8GLNsMALBaddZ05hvtkfI&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-tvcg","session_youtube_ff_id":"khn38dy2CQk","session_youtube_ff_link":"https://youtu.be/khn38dy2CQk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=1h0m57s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T18:45:00Z","title":"What Does the Chart Say? Grouping Cues Guide Viewer Comparisons and Conclusions in Bar Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233322372","abstract":"Visualization linting is a proven effective tool in assisting users to follow established visualization guidelines. Despite its success, visualization linting for choropleth maps, one of the most popular visualizations on the internet, has yet to be investigated. In this paper, we present GeoLinter, a linting framework for choropleth maps that assists in creating accurate and robust maps. Based on a set of design guidelines and metrics drawing upon a collection of best practices from the cartographic literature, GeoLinter detects potentially suboptimal design decisions and provides further recommendations on design improvement with explanations at each step of the design process. We perform a validation study to evaluate the proposed framework's functionality with respect to identifying and fixing errors and apply its results to improve the robustness of GeoLinter. Finally, we demonstrate the effectiveness of the GeoLinter - validated through empirical studies - by applying it to a series of case studies using real-world datasets.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Fan Lei"},{"affiliations":"","email":"","is_corresponding":false,"name":"Arlen Fan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alan M. MacEachren"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ross Maciejewski"}],"award":"","doi":"10.1109/TVCG.2023.3322372","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233322372","image_caption":"The GeoLinter Interface: (A) the VegaLite code editor; (B) the original map; (C) the map after applying soft fixes; (D) classification recommendations; (E) detected violations with guides on map improvements, and; (F) the status panel. A choropleth map showing the value per capita of freight shipments in the U.S. by state 2002. In the original choropleth map design (B), the data classification accuracy is lower than the average value; the colors between bins are nearly indistinguishable; the map data has not been normalized and the data units are missing. After applying the suggested fixes from GeoLinter, the designer produces (C).","keywords":["Data visualization , Image color analysis , Geology , Recommender systems , Guidelines , Bars , Visualization Author Keywords: Automated visualization design , choropleth maps , visualization linting , visualization recommendation"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2310.13707","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/yBF6qqK_ASs&t=0h0m55s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233322372/v-tvcg-20233322372_Preview.mp4?token=-k42yJtftMEU_gUi7HIlQR2mfoAwnh4Gh3wzAVhT740&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233322372/v-tvcg-20233322372_Preview.srt?token=JHK35i5tO62M4yeLJ_2PfyJgF0mJSTX6uxdiwQBk3Iw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full10","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Designing Palettes and Encodings","session_uid":"v-tvcg","session_youtube_ff_id":"p6_Sf3E7KPI","session_youtube_ff_link":"https://youtu.be/p6_Sf3E7KPI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/yBF6qqK_ASs&t=0h0m55s","sessions":["Designing Palettes and Encodings"],"time_stamp":"2024-10-16T17:45:00Z","title":"GeoLinter: A Linting Framework for Choropleth Maps","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1060","abstract":"There is increased interest in understanding the interplay between text and visuals in the field of data visualization. However, this attention has predominantly been on the use of text in standalone visualizations (such as text annotation overlays) or augmenting text stories supported by a series of independent views. In this paper, we shift from the traditional focus on single-chart annotations to characterize the nuanced but crucial communication role of text in the complex environment of interactive dashboards. Through a survey and analysis of 190 dashboards in the wild, plus 13 expert interview sessions with experienced dashboard authors, we highlight the distinctive nature of text as an integral component of the dashboard experience, while delving into the categories, semantic levels, and functional roles of text, and exploring how these text elements are coalesced by dashboard authors to guide and inform dashboard users. Our contributions are threefold. First, we distill qualitative and quantitative findings from our studies to characterize current practices of text use in dashboards, including a categorization of text-based components and design patterns. Second, we leverage current practices and existing literature to propose, discuss, and validate recommended practices for text in dashboards, embodied as a set of 12 heuristics that underscore the semantic and functional role of text in offering navigational cues, contextualizing data insights, supporting reading order, among other concerns. Third, we reflect on our findings to identify gaps and propose opportunities for data visualization researchers to push the boundaries on text usage for dashboards, from authoring support and interactivity to text generation and content personalization. Our research underscores the significance of elevating text as a first-class citizen in data visualization, and the need to support the inclusion of textual components and their interactive affordances in dashboard design.","accessible_pdf":true,"authors":[{"affiliations":["Tableau Research, Seattle, United States"],"email":"nicole.sultanum@gmail.com","is_corresponding":true,"name":"Nicole Sultanum"},{"affiliations":["Tableau Research, Palo Alto, United States"],"email":"vsetlur@tableau.com","is_corresponding":false,"name":"Vidya Setlur"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1060","image_caption":"Our work seeks to elevate text as a first-class citizen in dashboards. From a survey and analysis of 190 dashboards and interview feedback from 13 experts, we (a) highlight current dashboard text practices, (b) propose and validate recommended practices as a set of 12 heuristics for dashboard text, and (c) outline opportunities for future research to take dashboard text to the next level.","keywords":["Text, dashboards, semantic levels, metadata, interactivity, instruction, description, takeaways, conversational heuristics"],"open_access_supplemental_link":"https://osf.io/49zp5/?view_only=cafb29af267d4b50a379050695c39712","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14451","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/GmSnZQ8onkA&t=0h37m51s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1060/v-full-1060_Preview.mp4?token=piuK0GmI2L8BDBH612TjHAs-Q5a4a9J1XrqK1pw0boM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1060/v-full-1060_Preview.srt?token=pP4Qpqztv-Lw1dRZpbJq792fxgCsh957Fu9fquq0v9A&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-full","session_youtube_ff_id":"OZmdwGmz1BI","session_youtube_ff_link":"https://youtu.be/OZmdwGmz1BI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=0h37m51s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T13:06:00Z","title":"From Instruction to Insight: Exploring the Semantic and Functional Roles of Text in Interactive Dashboards","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1295","abstract":"Annotations play a vital role in highlighting critical aspects of visualizations, aiding in data externalization and exploration, collaborative sensemaking, and visual storytelling. However, despite their widespread use, we identified a lack of a design space for common practices for annotations. In this paper, we evaluated over 1,800 static annotated charts to understand how people annotate visualizations in practice. Through qualitative coding of these diverse real-world annotated charts, we explored three primary aspects of annotation usage patterns: analytic purposes for chart annotations (e.g., present, identify, summarize, or compare data features), mechanisms for chart annotations (e.g., types and combinations of annotations used, frequency of different annotation types across chart types, etc.), and the data source used to generate the annotations. We then synthesized our findings into a design space of annotations, highlighting key design choices for chart annotations. We presented three case studies illustrating our design space as a practical framework for chart annotations to enhance the communication of visualization insights. All supplemental materials are available at \\url{https://shorturl.at/bAGM1}.","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States","University of Utah, Salt Lake City, United States"],"email":"dilshadur@sci.utah.edu","is_corresponding":true,"name":"Md Dilshadur Rahman"},{"affiliations":["University of Oklahoma, Norman, United States","University of Oklahoma, Norman, United States"],"email":"quadri@ou.edu","is_corresponding":false,"name":"Ghulam Jilani Quadri"},{"affiliations":["University of South Florida , Tampa, United States","University of South Florida , Tampa, United States"],"email":"bdoppalapudi@usf.edu","is_corresponding":false,"name":"Bhavana Doppalapudi"},{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States","University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"danielle.szafir@cs.unc.edu","is_corresponding":false,"name":"Danielle Albers Szafir"},{"affiliations":["University of Utah, Salt Lake City, United States","University of Utah, Salt Lake City, United States"],"email":"paul.rosen@utah.edu","is_corresponding":false,"name":"Paul Rosen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1295","image_caption":"A line chart from The Washington Post illustrates COVID-19 peak comparisons, plotting time on the horizontal axis and percentage growth relative to the January 2021 peak vertically: top-left shows the baseline chart with basic visualization elements (i.e., axes, labels, lines, legends, and gridlines) but with annotations removed; top-right uses color+enclosure+text ensembles of annotations to help identify the peaks of different COVID-19 waves; bottom-left uses text+connector ensembles to present additional context from the associated article; and bottom-right displays the completely annotated chart.","keywords":["Annotations, visualizations, qualitative study, design space, taxonomy"],"open_access_supplemental_link":"https://shorturl.at/bAGM1","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2306.06043","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/GmSnZQ8onkA&t=0h12m39s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1295/v-full-1295_Preview.mp4?token=_i10nu4OVfcDWUUNBh5WygxjQbR6zDtTaRFQ2SYbFSA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1295/v-full-1295_Preview.srt?token=vDAxRPdIju68mPl5pEWvJRetZ5qX4nU96y3icPRXq8g&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-full","session_youtube_ff_id":"UiheOlbONP0","session_youtube_ff_link":"https://youtu.be/UiheOlbONP0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=0h12m39s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T12:42:00Z","title":"A Qualitative Analysis of Common Practices in Annotations: A Taxonomy and Design Space","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1316","abstract":"We apply an approach from cognitive linguistics by mapping Conceptual Metaphor Theory (CMT) to the visualization domain to address patterns of visual conceptual metaphors that are often used in science infographics. Metaphors play an essential part in visual communication and are frequently employed to explain complex concepts. However, their use is often based on intuition, rather than following a formal process. At present, we lack tools and language for understanding and describing metaphor use in visualization to the extent where taxonomy and grammar could guide the creation of visual components, e.g., infographics. Our classification of the visual conceptual mappings within scientific representations is based on the breakdown of visual components in existing scientific infographics. We demonstrate the development of this mapping through a detailed analysis of data collected from four domains (biomedicine, climate, space, and anthropology) that represent a diverse range of visual conceptual metaphors used in the visual communication of science. This work allows us to identify patterns of visual conceptual metaphor use within the domains, resolve ambiguities about why specific conceptual metaphors are used, and develop a better overall understanding of visual metaphor use in scientific infographics. Our analysis shows that ontological and orientational conceptual metaphors are the most widely applied to translate complex scientific concepts. To support our findings we developed a visual exploratory tool based on the collected database that places the individual infographics on a spatio-temporal scale and illustrates the breakdown of visual conceptual metaphors.","accessible_pdf":false,"authors":[{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"hana.pokojna@gmail.com","is_corresponding":true,"name":"Hana Pokojn\u00e1"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tobias.isenberg@gmail.com","is_corresponding":false,"name":"Tobias Isenberg"},{"affiliations":["University of Rostock, Rostock, Germany"],"email":"stefan.bruckner@gmail.com","is_corresponding":false,"name":"Stefan Bruckner"},{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"kozlikova@fi.muni.cz","is_corresponding":false,"name":"Barbora Kozlikova"},{"affiliations":["University of Bergen, Bergen, Norway","Haukeland University Hospital, University of Bergen, Bergen, Norway"],"email":"laura.garrison@uib.no","is_corresponding":false,"name":"Laura Garrison"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1316","image_caption":"image_VisualMetaphors This image illustrates our process (from left to right) for identifying and classifying visual conceptual metaphors in scientific infographics: 1) deconstruct a given infographic to its component graphics, 2) identify component graphics as visual conceptual metaphors versus visual abstractions, 3) classify the conceptual metaphor type (structural, ontological, orientational, or imagistic), and 4) provide infographic metadata and classify the spatiotemporal scale of the phenomenon visualized to enable detailed investigation in our Visual Exploratory Tool. ","keywords":["Visualization, visual metaphors, science communication, conceptual metaphors, visual communication"],"open_access_supplemental_link":"https://osf.io/8xrjm/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2407.13416","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/GmSnZQ8onkA&t=0h25m10s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1316/v-full-1316_Preview.mp4?token=aom1JV67M5JX6eNDgcArCWIG3btpoAxnO5PPvGZrCZI&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-full","session_youtube_ff_id":"vydQsSgBECk","session_youtube_ff_link":"https://youtu.be/vydQsSgBECk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=0h25m10s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T12:54:00Z","title":"The Language of Infographics: Toward Understanding Conceptual Metaphor Use in Scientific Storytelling","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1594","abstract":"The visualization community has a rich history of reflecting upon visualization design flaws. Although research in this area has remained lively, we believe it is essential to continuously revisit this classic and critical topic in visualization research by incorporating more empirical evidence from diverse sources, characterizing new design flaws, building more systematic theoretical frameworks, and understanding the underlying reasons for these flaws. To address the above gaps, this work investigated visualization design flaws through the lens of the public, constructed a framework to summarize and categorize the identified flaws, and explored why these flaws occur. Specifically, we analyzed 2227 flawed data visualizations collected from an online gallery and derived a design task-associated taxonomy containing 76 specific design flaws. These flaws were further classified into three high-level categories (i.e., misinformation, uninformativeness, unsociability) and ten subcategories (e.g., inaccuracy, unfairness, ambiguity). Next, we organized five focus groups to explore why these design flaws occur and identified seven causes of the flaws. Finally, we proposed a research agenda for combating visualization design flaws and summarize nine research opportunities.","accessible_pdf":false,"authors":[{"affiliations":["Fudan University, Shanghai, China","Fudan University, Shanghai, China"],"email":"xingyulan96@gmail.com","is_corresponding":true,"name":"Xingyu Lan"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom","University of Edinburgh, Edinburgh, United Kingdom"],"email":"coraline.liu.dataviz@gmail.com","is_corresponding":false,"name":"Yu Liu"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1594","image_caption":"The image consists of three panels: (i) a taxonomy of 76 design flaws, categorized into 3 high-level categories and 10 subcategories; (ii) an example of our website displaying detailed information on design flaws and the corpus; and (iii) an agenda on HOW to combat visualization design flaws.","keywords":["Visualization Design, General Public, Chart Junk, Deceptive Visualization, Misinformation, User Experience"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/GmSnZQ8onkA&t=0h51m10s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1594/v-full-1594_Preview.mp4?token=B9Jdz8SGk1SHbPzpmdPFshIFsnyhUeDMu4Jk_yQSLvQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1594/v-full-1594_Preview.srt?token=LnGDD4FF6qg_f5naq4IZIiYCBHpBuyjrXVzyyUDpyXA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-full","session_youtube_ff_id":"4OD9F2xvtPk","session_youtube_ff_link":"https://youtu.be/4OD9F2xvtPk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=0h51m10s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T13:18:00Z","title":"\"I Came Across a Junk\": Understanding Design Flaws of Data Visualization from the Public's Perspective","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1810","abstract":"Classical bibliography, by researching preserved catalogs from both official archives and personal collections of accumulated books, examines the books throughout history, thereby revealing cultural development across historical periods. In this work, we collaborate with domain experts to accomplish the task of data annotation concerning Chinese ancient catalogs. We introduce the CataAnno system that facilitates users in completing annotations more efficiently through cross-linked views, recommendation methods and convenient annotation interactions. The recommendation method can learn the background knowledge and annotation patterns that experts subconsciously integrate into the data during prior annotation processes. CataAnno searches for the most relevant examples previously annotated and recommends to the user. Meanwhile, the cross-linked views assist users in comprehending the correlations between entries and offer explanations for these recommendations. Evaluation and expert feedback confirm that the CataAnno system, by offering high-quality recommendations and visualizing the relationships between entries, can mitigate the necessity for specialized knowledge during the annotation process. This results in enhanced accuracy and consistency in annotations, thereby enhancing the overall efficiency.","accessible_pdf":false,"authors":[{"affiliations":["Peking University, Beijing, China"],"email":"hanning.shao@pku.edu.cn","is_corresponding":true,"name":"Hanning Shao"},{"affiliations":["Peking University, Beijing, China"],"email":"xiaoru.yuan@pku.edu.cn","is_corresponding":false,"name":"Xiaoru Yuan"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1810","image_caption":"Classical bibliography examines the books throughout history and reveal cultural development by researching preserved catalogs. Through interdisciplinary collaboration, we propose CataAnno, an intelligent annotation system that helps with annotation cleaning of these ancient catalogs. Learning base recommendations and convenient interactions supported by CataAnno enhances the consistency and efficiency of the annotation process.","keywords":["Digital humanities, text annotation tool, text visualization, machine learning, catalog"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/GmSnZQ8onkA&t=1h4m54s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1810/v-full-1810_Preview.mp4?token=svlpWrXTXba_ADnrB67YdHrra2wvXxdOQlnNYeB_NlU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1810/v-full-1810_Preview.srt?token=-cCkRQ40jUMa22Rdt6KH-foUcUxQp4MuQSNoO1WfsZs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-full","session_youtube_ff_id":"JP2jrdeR04g","session_youtube_ff_link":"https://youtu.be/JP2jrdeR04g","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=1h4m54s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T13:30:00Z","title":"CataAnno: An Ancient Catalog Annotator for Annotation Cleaning by Recommendation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233338451","abstract":"This paper investigates the role of text in visualizations, specifically the impact of text position, semantic content, and biased wording. Two empirical studies were conducted based on two tasks (predicting data trends and appraising bias) using two visualization types (bar and line charts). While the addition of text had a minimal effect on how people perceive data trends, there was a significant impact on how biased they perceive the authors to be. This finding revealed a relationship between the degree of bias in textual information and the perception of the authors' bias. Exploratory analyses support an interaction between a person's prediction and the degree of bias they perceived. This paper also develops a crowdsourced methodfor creating chart annotations that range from neutral to highly biased.This research highlights the need for designers to mitigate potential polarization of readers' opinions based on howauthors' ideas are expressed.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Chase Stokes"},{"affiliations":"","email":"","is_corresponding":false,"name":"Cindy Xiong Bearfield"},{"affiliations":"","email":"","is_corresponding":false,"name":"Marti Hearst"}],"award":"","doi":"10.1109/TVCG.2023.3338451","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233338451","image_caption":"Left: Study stimuli consisted of line and bar charts that were derived from prior work and designed to have ambiguous prediction outcomes. The experiments varied the text position and text content for these charts; examples of these stimuli from both studies are shown behind the baseline charts. Right: Two tasks were studied with crowdsourced participants: prediction of the outcome of the trend, and assessment of the bias of the visualization author using the assessment questions shown.","keywords":["Visualization, text, annotation, perceived bias, judgment, prediction"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/GmSnZQ8onkA&t=0h0m36s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233338451/v-tvcg-20233338451_Preview.mp4?token=GDOcprxTiC1GFyxj-7-5YEpQVECOUAMrHfVYbVVwsQQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233338451/v-tvcg-20233338451_Preview.srt?token=uD8QUWlQuAWjv93HYRjzVjp9MRqXSA0I_2IHYxp7x_o&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full11","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Text, Annotation, and Metaphor","session_uid":"v-tvcg","session_youtube_ff_id":"zVf1a096Lj8","session_youtube_ff_link":"https://youtu.be/zVf1a096Lj8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/GmSnZQ8onkA&t=0h0m36s","sessions":["Text, Annotation, and Metaphor"],"time_stamp":"2024-10-16T12:30:00Z","title":"The Role of Text in Visualizations: How Annotations Shape Perceptions of Bias and Influence Predictions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1281","abstract":"Participatory budgeting (PB) is a democratic approach to allocating municipal spending that has been adopted in many places in recent years, including in Chicago. Current PB voting resembles a ballot where residents are asked which municipal projects, such as school improvements and road repairs, to fund with a limited budget. In this work, we ask how interactive visualization can benefit PB by conducting a design probe-based interview study (N=13) with policy workers and academics with expertise in PB, urban planning, and civic HCI. Our probe explores how graphical elicitation of voter preferences and a dashboard of voting statistics can be incorporated into a realistic PB tool. Through qualitative analysis, we find that visualization creates opportunities for city government to set expectations about budget constraints while also granting their constituents greater freedom to articulate a wider range of preferences. However, using visualization to provide transparency about PB requires efforts to mitigate potential access barriers and mistrust. We call for more visualization professionals to help build civic capacity by working in and studying political systems.","accessible_pdf":true,"authors":[{"affiliations":["University of Chicago, Chicago, United States"],"email":"kalea@uchicago.edu","is_corresponding":true,"name":"Alex Kale"},{"affiliations":["University of Chicago, Chicago, United States"],"email":"danni6@uchicago.edu","is_corresponding":false,"name":"Danni Liu"},{"affiliations":["University of Chicago, Chicago, United States"],"email":"mariagabrielaa@uchicago.edu","is_corresponding":false,"name":"Maria Gabriela Ayala"},{"affiliations":["University of Chicago, Chicago, United States"],"email":"hwschwab@uchicago.edu","is_corresponding":false,"name":"Harper Schwab"},{"affiliations":["University of Washington, Seattle, United States","University of Utah, Salt Lake City, United States"],"email":"mcnutt.andrew@gmail.com","is_corresponding":false,"name":"Andrew M McNutt"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1281","image_caption":"An illustration of the application scenario for this work, participatory budgeting in Chicago. We investigate the roles that visualization can play in voting on how municipal funding should be spent on neighborhood projects and reporting results of the participatory budgeting vote to stakeholders.","keywords":["Visualization, Preference elicitation, Digital democracy"],"open_access_supplemental_link":"https://osf.io/tn6m2/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2407.20103","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/3FAi9iPZPRA&t=0h27m55s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1281/v-full-1281_Preview.mp4?token=y9YINAlWfIcoFGqsDl8_Cv6p7SfGysnOf5tLybgHOTU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1281/v-full-1281_Preview.srt?token=dmpHsfpn5XSrgmXauP9Z0qWOGW45CNrthyZEpM3D2o0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-full","session_youtube_ff_id":"Uwwba1Z9EbE","session_youtube_ff_link":"https://youtu.be/Uwwba1Z9EbE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=0h27m55s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T18:09:00Z","title":"What Can Interactive Visualization do for Participatory Budgeting in Chicago?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1438","abstract":"Differential privacy ensures the security of individual privacy but poses challenges to data exploration processes because the limited privacy budget incapacitates the flexibility of exploration and the noisy feedback of data requests leads to confusing uncertainty. In this study, we take the lead in describing corresponding exploration scenarios, including underlying requirements and available exploration strategies. To facilitate practical applications, we propose a visual analysis approach to the formulation of exploration strategies. Our approach applies a reinforcement learning model to provide diverse suggestions for exploration strategies according to the exploration intent of users. A novel visual design for representing uncertainty in correlation patterns is integrated into our prototype system to support the proposed approach. Finally, we implemented a user study and two case studies. The results of these studies verified that our approach can help develop strategies that satisfy the exploration intent of users.","accessible_pdf":false,"authors":[{"affiliations":["Nankai University, Tianjin, China"],"email":"wangxumeng@nankai.edu.cn","is_corresponding":true,"name":"Xumeng Wang"},{"affiliations":["Nankai University, Tianjin, China"],"email":"jiaoshuangcheng@mail.nankai.edu.cn","is_corresponding":false,"name":"Shuangcheng Jiao"},{"affiliations":["Arizona State University, Tempe, United States"],"email":"cbryan16@asu.edu","is_corresponding":false,"name":"Chris Bryan"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1438","image_caption":"Defogger augments the ability of humans to explore and gain increased value from data while adhering to constraints of Differential privacy.","keywords":["Differential privacy, Visual data analysis, Data exploration, Visualization for uncertainty illustration"],"open_access_supplemental_link":"https://github.com/Vanellope7/Defogger","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2407.19364","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/3FAi9iPZPRA&t=1h0m52s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1438/v-full-1438_Preview.mp4?token=uETUHjyLsHeNXUZg0czlJ92MqaE_uZaICE3o3vENo_A&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1438/v-full-1438_Preview.srt?token=YKeChoR-RmVNNjhdT2CpXwuYLiFnaKF2TXQe5ZTWlYE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-full","session_youtube_ff_id":"BDNvBU24Hls","session_youtube_ff_link":"https://youtu.be/BDNvBU24Hls","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=1h0m52s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T18:45:00Z","title":"Defogger: A Visual Analysis Approach for Data Exploration of Sensitive Data Protected by Differential Privacy","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1446","abstract":"This paper defines, analyzes, and discusses the emerging genre of visualization atlases. We currently witness an increase in web-based, data-driven initiatives that call themselves \u201catlases\u201d while explaining complex, contemporary issues through data and visualizations: climate change, sustainability, AI, or cultural discoveries. To understand this emerging genre and inform their design, study, and authoring support, we conducted a systematic analysis of 33 visualization atlases and semi-structured interviews with eight visualization atlas creators. Based on our results, we contribute (1) a definition of a visualization atlas as a compendium of (web) pages aimed at explaining and supporting exploration of data about a dedicated topic through data, visualizations and narration. (2) a set of design patterns of 8 design dimensions, (3) insights into the atlas creation from interviews and (4) the definition of 5 visualization atlas genres. We found that visualization atlases are unique in the way they combine i) exploratory visualization, ii) narrative elements from data-driven storytelling and iii) structured navigation mechanisms. They target a wide range of audiences with different levels of domain knowledge, acting as tools for study, communication, and discovery. We conclude with a discussion of current design practices and emerging questions around the ethics and potential real-world impact of visualization atlases, aimed to inform the design and study of visualization atlases.","accessible_pdf":false,"authors":[{"affiliations":["The University of Edinburgh, Edinburgh, United Kingdom"],"email":"jinrui.w@outlook.com","is_corresponding":true,"name":"Jinrui Wang"},{"affiliations":["Newcastle University, Newcastle Upon Tyne, United Kingdom"],"email":"xinhuan.shu@gmail.com","is_corresponding":false,"name":"Xinhuan Shu"},{"affiliations":["Inria, Bordeaux, France","University of Edinburgh, Edinburgh, United Kingdom"],"email":"bbach@inf.ed.ac.uk","is_corresponding":false,"name":"Benjamin Bach"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"uhinrich@ed.ac.uk","is_corresponding":false,"name":"Uta Hinrichs"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1446","image_caption":"An overview of the paper 'Visualization Atlases: Explaining and Exploring Complex Topics through Data, Visualization, and Narration' by Jinrui Wang, Xinhuan Shu, Benjamin Bach, and Ute Hinrichs, featuring a backdrop of selected covers from the visualization atlas cases analyzed in the survey.","keywords":["Visualization Atlases, Information Visualization, Data-driven Storytelling"],"open_access_supplemental_link":"https://vis-atlas.github.io","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/3FAi9iPZPRA&t=0h50m56s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1446/v-full-1446_Preview.mp4?token=gK7iNaMwM5fAFvMQve39FBaxshqihdeh3jM1dP0VQ34&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1446/v-full-1446_Preview.srt?token=amVtBvItJPrjZfE4PpsooM1gYrfxFwUYBhJAnKT5w2c&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-full","session_youtube_ff_id":"S5Pi7FB5Eek","session_youtube_ff_link":"https://youtu.be/S5Pi7FB5Eek","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=0h50m56s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T18:33:00Z","title":"Visualization Atlases: Explaining and Exploring Complex Topics through Data, Visualization, and Narration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1488","abstract":"A year ago, we submitted an IEEE VIS paper entitled \u201cSwaying the Public? Impacts of Election Forecast Visualizations on Emotion, Trust, and Intention in the 2022 U.S. Midterms\u201d [50], which was later bestowed with the honor of a best paper award. Yet, studying such a complex phenomenon required us to explore many more design paths than we could count, and certainly more than we could document in a single paper. This paper, then, is the unwritten prequel\u2014the backstory. It chronicles our journey from a simple idea\u2014to study visualizations for election forecasts\u2014through obstacles such as developing meaningfully different, easy-to-understand forecast visualizations, crafting professional-looking forecasts, and grappling with how to study perceptions of the forecasts before, during, and after the 2022 U.S. midterm elections. This journey yielded a rich set of original knowledge. We formalized a design space for two-party election forecasts, navigating through dimensions like data transformations, visual channels, and types of animated narratives. Through qualitative evaluation of ten representative prototypes with 13 participants, we then identi\ufb01ed six core insights into the interpretation of uncertainty visualizations in a U.S. election context. These insights informed our revisions to remove ambiguity in our visual encodings and to prepare a professional-looking forecasting website. As part of this story, we also distilled challenges faced and design lessons learned to inform both designers and practitioners. Ultimately, we hope our methodical approach could inspire others in the community to tackle the hard problems inherent to designing and evaluating visualizations for the general public.","accessible_pdf":true,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"fumeng.p.yang@gmail.com","is_corresponding":true,"name":"Fumeng Yang"},{"affiliations":["Northwestern University, Evanston, United States","Northwestern University, Evanston, United States"],"email":"mandicai2028@u.northwestern.edu","is_corresponding":false,"name":"Mandi Cai"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"chloemortenson2026@u.northwestern.edu","is_corresponding":false,"name":"Chloe Rose Mortenson"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"hoda@u.northwestern.edu","is_corresponding":false,"name":"Hoda Fakhari"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"aysedlokmanoglu@gmail.com","is_corresponding":false,"name":"Ayse Deniz Lokmanoglu"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"nicholas.diakopoulos@gmail.com","is_corresponding":false,"name":"Nicholas Diakopoulos"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"erik.nisbet@northwestern.edu","is_corresponding":false,"name":"Erik Nisbet"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1488","image_caption":"We iterated over numerous designs for the election forecast visualizations for the 2022 governor elections. This paper documents our journey, experiences, and lessons learned.","keywords":["Uncertainty visualization, probabilistic forecasts, design space, animation"],"open_access_supplemental_link":"https://www.doi.org/10.17605/osf.io/ygq2v","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.31219/osf.io/927vy","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/3FAi9iPZPRA&t=0h39m1s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1488/v-full-1488_Preview.mp4?token=YEVn09ch56y13mFON0apbM7OHxXyXR0MIoh07wLauQs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1488/v-full-1488_Preview.srt?token=EIKBFxA2WYMJZ5-WNVd7RX0F558nVKQtCGmnsQFM1Ic&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-full","session_youtube_ff_id":"haLpw_OzpFw","session_youtube_ff_link":"https://youtu.be/haLpw_OzpFw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=0h39m1s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T18:21:00Z","title":"The Backstory to \u201cSwaying the Public\u201d: A Design Chronicle of Election Forecast Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233287585","abstract":"Data visualization and journalism are deeply connected. From early infographics to recent data-driven storytelling, visualization has become an integrated part of contemporary journalism, primarily as a communication artifact to inform the general public. Data journalism, harnessing the power of data visualization, has emerged as a bridge between the growing volume of data and our society. Visualization research that centers around data storytelling has sought to understand and facilitate such journalistic endeavors. However, a recent metamorphosis in journalism has brought broader challenges and opportunities that extend beyond mere communication of data. We present this article to enhance our understanding of such transformations and thus broaden visualization research's scope and practical contribution to this evolving field. We first survey recent significant shifts, emerging challenges, and computational practices in journalism. We then summarize six roles of computing in journalism and their implications. Based on these implications, we provide propositions for visualization research concerning each role. Ultimately, by mapping the roles and propositions onto a proposed ecological model and contextualizing existing visualization research, we surface seven general topics and a series of research agendas that can guide future visualization research at this intersection.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yu Fu"},{"affiliations":"","email":"","is_corresponding":false,"name":"John Stasko"}],"award":"","doi":"10.1109/TVCG.2023.3287585","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233287585","image_caption":"This diagram highlights the intersection of journalism and visualization, focusing on Six Roles of Computing in Journalism: Facilitator, Analyzer, Communicator, Public Forum, Automator, and Auditor. It outlines key transformations in journalism, like interactive and personalized news, and explores computational practices such as data journalism and computer-assisted reporting. The diagram also proposes seven research topics to advance visualization's role in journalism, including combating misinformation and supporting analytical tasks. The aim is to contextualize visualization's value in addressing emerging challenges and enhancing journalistic practices. ","keywords":["Computational journalism,data visualization,data-driven storytelling, journalism"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/3FAi9iPZPRA&t=0h0m5s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233287585/v-tvcg-20233287585_Preview.mp4?token=_kT4xh5FeUt7w5Nhf5xrZIZbU3GAPDjkCZH90GmLwcY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233287585/v-tvcg-20233287585_Preview.srt?token=L-kBpKwLlfShttlv4uMAZZRYshD-haWVX6NJnJKx1a8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-tvcg","session_youtube_ff_id":"vhoQGLEX1W8","session_youtube_ff_link":"https://youtu.be/vhoQGLEX1W8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=0h0m5s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T17:45:00Z","title":"More Than Data Stories: Broadening the Role of Visualization in Contemporary Journalism","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243355884","abstract":"News articles containing data visualizations play an important role in informing the public on issues ranging from public health to politics. Recent research on the persuasive appeal of data visualizations suggests that prior attitudes can be notoriously difficult to change. Inspired by an NYT article, we designed two experiments to evaluate the impact of elicitation and contrasting narratives on attitude change, recall, and engagement. We hypothesized that eliciting prior beliefs leads to more elaborative thinking that ultimately results in higher attitude change, better recall, and engagement. Our findings revealed that visual elicitation leads to higher engagement in terms of feelings of surprise. While there is an overall attitude change across all experiment conditions, we did not observe a significant effect of belief elicitation on attitude change. With regard to recall error, while participants in the draw trend elicitation exhibited significantly lower recall error than participants in the categorize trend condition, we found no significant difference in recall error when comparing elicitation conditions to no elicitation. In a follow-up study, we added contrasting narratives with the purpose of making the main visualization (communicating data on the focal issue) appear strikingly different. Compared to the results of Study 1, we found that contrasting narratives improved engagement in terms of surprise and interest but interestingly resulted in higher recall error and no significant change in attitude. We discuss the effects of elicitation and contrasting narratives in the context of topic involvement and the strengths of temporal trends encoded in the data visualization.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Milad Rogha"},{"affiliations":"","email":"","is_corresponding":false,"name":"Subham Sah"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alireza Karduni"},{"affiliations":"","email":"","is_corresponding":false,"name":"Douglas Markant"},{"affiliations":"","email":"","is_corresponding":false,"name":"Wenwen Dou"}],"award":"","doi":"10.1109/TVCG.2024.3355884","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243355884","image_caption":"Data visualizations in news articles not only inform but also play a crucial role in shaping public opinion on important issues. Can data visualization researchers and designers \u2018nudge\u2019 people toward more elaborative thinking? Inspired by a New York Times article, we conducted two experiments to explore how eliciting prior beliefs and contrasting narratives influence engagement, attitude change, and recall.","keywords":["Data Visualization, Market Research, Visualization, Uncertainty, Data Models, Correlation, Attitude Control, Belief Elicitation, Visual Elicitation, Data Visualization, Contrasting Narratives"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2401.05511","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/3FAi9iPZPRA&t=0h14m48s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243355884/v-tvcg-20243355884_Preview.mp4?token=nw4kl_fRHfxOfS0H7UuBVjdlxGqlCmM-pK__biskZ4I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243355884/v-tvcg-20243355884_Preview.srt?token=JEfeNpbl8wrsDFdI5umzIeceGgHcse6UJpJmkBCfq0A&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full12","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Journalism and Public Policy","session_uid":"v-tvcg","session_youtube_ff_id":"iryPS3aExhY","session_youtube_ff_link":"https://youtu.be/iryPS3aExhY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/3FAi9iPZPRA&t=0h14m48s","sessions":["Journalism and Public Policy"],"time_stamp":"2024-10-17T17:57:00Z","title":"The Impact of Elicitation and Contrasting Narratives on Engagement, Recall and Attitude Change with News Articles Containing Data Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1121","abstract":"Acute stroke demands prompt diagnosis and treatment to achieve optimal patient outcomes. However, the intricate and irregular nature of clinical data associated with acute stroke, particularly blood pressure (BP) measurements, presents substantial obstacles to effective visual analytics and decision-making. Through a year-long collaboration with experienced neurologists, we developed PhenoFlow, a visual analytics system that leverages the collaboration between human and Large Language Models (LLMs) to analyze the extensive and complex data of acute ischemic stroke patients. PhenoFlow pioneers an innovative workflow, where the LLM serves as a data wrangler while neurologists explore and supervise the output using visualizations and natural language interactions. This approach enables neurologists to focus more on decision-making with reduced cognitive load. To protect sensitive patient information, PhenoFlow only utilizes metadata to make inferences and synthesize executable codes, without accessing raw patient data. This ensures that the results are both reproducible and interpretable while maintaining patient privacy. The system incorporates a slice-and-wrap design that employs temporal folding to create an overlaid circular visualization. Combined with a linear bar graph, this design aids in exploring meaningful patterns within irregularly measured BP data. Through case studies, PhenoFlow has demonstrated its capability to support iterative analysis of extensive clinical datasets, reducing cognitive load and enabling neurologists to make well-informed decisions. Grounded in long-term collaboration with domain experts, our research demonstrates the potential of utilizing LLMs to tackle current challenges in data-driven clinical decision-making for acute ischemic stroke patients.","accessible_pdf":false,"authors":[{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"jykim@hcil.snu.ac.kr","is_corresponding":true,"name":"Jaeyoung Kim"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"sihyeon@hcil.snu.ac.kr","is_corresponding":false,"name":"Sihyeon Lee"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"hj@hcil.snu.ac.kr","is_corresponding":false,"name":"Hyeon Jeon"},{"affiliations":["Korea University Guro Hospital, Seoul, Korea, Republic of"],"email":"gooday19@gmail.com","is_corresponding":false,"name":"Keon-Joo Lee"},{"affiliations":["Hankuk University of Foreign Studies, Yongin-si, Korea, Republic of"],"email":"bkim@hufs.ac.kr","is_corresponding":false,"name":"Bohyoung Kim"},{"affiliations":["Seoul National University Bundang Hospital, Seongnam, Korea, Republic of"],"email":"braindoc@snu.ac.kr","is_corresponding":false,"name":"HEE JOON"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"jseo@snu.ac.kr","is_corresponding":false,"name":"Jinwook Seo"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1121","image_caption":"PhenoFlow empowers neurologists to explore large and complex stroke datasets with reduced cognitive load. (A) The cohort construction component allows neurologists to define target cohorts using natural language. (B) The Visual Inspection View provides plain-language explanations and small multiples of relevant fields to debug LLM data wrangler behavior. (C) The Cohort View summarizes (C1) cohort relationships in a node-link diagram and (C2) each patient's blood pressure (BP) trajectories as matrix visualization. (C3) Natural language filtering supports iterative cohort exploration. (D1) Linear bar charts and (D2) slice-and-wrap visualization present BP trajectories as time-series, revealing triangular patterns in irregularly measured BP data.","keywords":["Stroke, Irregularly spaced time-series data, Multi-dimensional data, Cohort analysis, Large language models"],"open_access_supplemental_link":"https://osf.io/q6yc4/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.16329","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/TkBPnodArzQ&t=0h13m16s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1121/v-full-1121_Preview.mp4?token=lSkRirWnWHhBzgGO2eXgj7s4VeGJqJXdwF--jEY_GC8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1121/v-full-1121_Preview.srt?token=hwvYBVFbHn0wLpZURC3KYGxqDt__oD8g1j19mCZQKZk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-full","session_youtube_ff_id":"K9vSYLsemPM","session_youtube_ff_link":"https://youtu.be/K9vSYLsemPM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h13m16s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T16:12:00Z","title":"PhenoFlow: A Human-LLM Driven Visual Analytics System for Exploring Large and Complex Stroke Datasets","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1474","abstract":"Recent advancements in Large Language Models (LLMs) and Prompt Engineering have made chatbot customization more accessible, significantly reducing barriers to tasks that previously required programming skills. However, prompt evaluation, especially at the dataset scale, remains complex due to the need to assess prompts across thousands of test instances within a dataset. Our study, based on a comprehensive literature review and pilot study, summarized five critical challenges in prompt evaluation. In response, we introduce a feature-oriented workflow for systematic prompt evaluation. In the context of text summarization, our workflow advocates evaluation with summary characteristics (feature metrics) such as complexity, formality, or naturalness, instead of using traditional quality metrics like ROUGE. This design choice enables a more user-friendly evaluation of prompts, as it guides users in sorting through the ambiguity inherent in natural language. To support this workflow, we introduce Awesum, a visual analytics system that facilitates identifying optimal prompt refinements for text summarization through interactive visualizations, featuring a novel Prompt Comparator design that employs a BubbleSet-inspired design enhanced by dimensionality reduction techniques. We evaluate the xeffectiveness and general applicability of the system with practitioners from various domains and found that (1) our design helps overcome the learning curve for non-technical people to conduct a systematic evaluation of summarization prompts, and (2) our feature-oriented workflow has the potential to generalize to other NLG and image-generation tasks. For future works, we advocate moving towards feature-oriented evaluation of LLM prompts and discuss unsolved challenges in terms of human-agent interaction.","accessible_pdf":false,"authors":[{"affiliations":["University of California Davis, Davis, United States"],"email":"ytlee@ucdavis.edu","is_corresponding":true,"name":"Sam Yu-Te Lee"},{"affiliations":["University of California, Davis, Davis, United States"],"email":"abahukhandi@ucdavis.edu","is_corresponding":false,"name":"Aryaman Bahukhandi"},{"affiliations":["University of California at Davis, Davis, United States"],"email":"dyuliu@ucdavis.edu","is_corresponding":false,"name":"Dongyu Liu"},{"affiliations":["University of California at Davis, Davis, United States"],"email":"ma@cs.ucdavis.edu","is_corresponding":false,"name":"Kwan-Liu Ma"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1474","image_caption":"Bubble Plot, the key visualization in Awesum, designed to show prompt performance. Yellow curves suggest improvements, and purple curves suggest deterioration. The image suggests a mixed performance. ","keywords":["Visual analytics, prompt engineering, text summarization, human-computer interaction, dimensionality reduction"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.12192","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/TkBPnodArzQ&t=0h43m52s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1474/v-full-1474_Preview.mp4?token=8Hfco-UGUujuyZG-6mVef9LTBGpU15pw5Etlf-Jd148&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1474/v-full-1474_Preview.srt?token=BJaRDhTR-831s4wGTxWJtiiJw0QfZMYQkc2ymh0z7nY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-full","session_youtube_ff_id":"H4QzA6XFPFs","session_youtube_ff_link":"https://youtu.be/H4QzA6XFPFs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h43m52s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T16:48:00Z","title":"Towards Dataset-scale and Feature-oriented Evaluation of Text Summarization in Large Language Model Prompts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1504","abstract":"A wide range of visualization authoring interfaces enable the creation of highly customized visualizations. However, prioritizing expressiveness often impedes the learnability of the authoring interface. The diversity of users, such as varying computational skills and prior experiences in user interfaces, makes it even more challenging for a single authoring interface to satisfy the needs of a broad audience. In this paper, we introduce a framework to balance learnability and expressivity in a visualization authoring system. Adopting insights from learnability studies, such as multimodal interaction and visualization literacy, we explore the design space of blending multiple visualization authoring interfaces for supporting authoring tasks in a complementary and flexible manner. To evaluate the effectiveness of blending interfaces, we implemented a proof-of-concept system, Blace, that combines four common visualization authoring interfaces\u2014template-based, shelf configuration, natural language, and code editor\u2014that are tightly linked to one another to help users easily relate unfamiliar interfaces to more familiar ones. Using the system, we conducted a user study with 12 domain experts who regularly visualize genomics data as part of their analysis workflow. Participants with varied visualization and programming backgrounds were able to successfully reproduce unfamiliar visualization examples without a guided tutorial in the study. Feedback from a post-study qualitative questionnaire further suggests that blending interfaces enabled participants to learn the system easily and assisted them in confidently editing unfamiliar visualization grammar in the code editor, enabling expressive customization. Reflecting on our study results and the design of our system, we discuss the different interaction patterns that we identified and design implications for blending visualization authoring interfaces.","accessible_pdf":true,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"sehi_lyi@hms.harvard.edu","is_corresponding":true,"name":"Sehi L'Yi"},{"affiliations":["Eindhoven University of Technology, Eindhoven, Netherlands"],"email":"a.v.d.brandt@tue.nl","is_corresponding":false,"name":"Astrid van den Brandt"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"etowah_adams@hms.harvard.edu","is_corresponding":false,"name":"Etowah Adams"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"huyen_nguyen@hms.harvard.edu","is_corresponding":false,"name":"Huyen N. Nguyen"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1504","image_caption":"The trade-off between learnability and expressivity has been discussed as an important design consideration for visualization authoring systems. We present Blended Interfaces, a framework for combining multiple authoring interfaces in a complementary way to balance learnability and expressivity.","keywords":["Visualization authoring, blended interfaces, genomics data visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/pjcn4","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/TkBPnodArzQ&t=0h0m57s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1504/v-full-1504_Preview.mp4?token=jFGXnF-YKBBGg2rqeIRI8fjQS13h17VKJfyxu12UsP8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1504/v-full-1504_Preview.srt?token=Rt-H1phZ7gxeNNZf9tIRQ-LGT0o9WudtW8kWLqMlzpU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-full","session_youtube_ff_id":"IL0N2WMISlg","session_youtube_ff_link":"https://youtu.be/IL0N2WMISlg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h0m57s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T16:00:00Z","title":"Learnable and Expressive Visualization Authoring Through Blended Interfaces","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243368060","abstract":"Visual analytics supports data analysis tasks within complex domain problems. However, due to the richness of data types, visual designs, and interaction designs, users need to recall and process a significant amount of information when they visually analyze data. These challenges emphasize the need for more intelligent visual analytics methods. Large language models have demonstrated the ability to interpret various forms of textual data, offering the potential to facilitate intelligent support for visual analytics. We propose LEVA, a framework that uses large language models to enhance users' VA workflows at multiple stages: onboarding, exploration, and summarization. To support onboarding, we use large language models to interpret visualization designs and view relationships based on system specifications. For exploration, we use large language models to recommend insights based on the analysis of system status and data to facilitate mixed-initiative exploration. For summarization, we present a selective reporting strategy to retrace analysis history through a stream visualization and generate insight reports with the help of large language models. We demonstrate how LEVA can be integrated into existing visual analytics systems. Two usage scenarios and a user study suggest that LEVA effectively aids users in conducting visual analytics.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yuheng Zhao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yixing Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yu Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xinyi Zhao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Junjie Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zekai Shao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Cagatay Turkay"},{"affiliations":"","email":"","is_corresponding":false,"name":"Siming Chen"}],"award":"","doi":"10.1109/TVCG.2024.3368060","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243368060","image_caption":"LEVA is a framework that uses large language models to enhance users' VA workflows at multiple stages: onboarding, exploration, and summarization. An implementation of LEVA comprises four components: (A) Users can communicate with LLMs and control the insight annotations in the Chat view; (B) The recommended insights for the next step of analysis from LLMs are updated in the Original system view; (C) Users can retrace the interaction history in the Interaction stream view; (D) Once a historical analysis path is selected, the generated insight report will display in the Report view.","keywords":["Insight recommendation, mixed-initiative, interface agent, large language models, visual analytics"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2403.05816","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/TkBPnodArzQ&t=0h24m57s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243368060/v-tvcg-20243368060_Preview.mp4?token=5FAfObJ8DIokplHb5XdWL3faFT4HuWTWbZ2VOvHgU4Q&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-tvcg","session_youtube_ff_id":"Dy0M7rbwbwo","session_youtube_ff_link":"https://youtu.be/Dy0M7rbwbwo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h24m57s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T16:24:00Z","title":"LEVA: Using Large Language Models to Enhance Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243368621","abstract":"The use of natural language interfaces (NLIs) to create charts is becoming increasingly popular due to the intuitiveness of natural language interactions. One key challenge in this approach is to accurately capture user intents and transform them to proper chart specifications. This obstructs the wide use of NLI in chart generation, as users' natural language inputs are generally abstract (i.e., ambiguous or under-specified), without a clear specification of visual encodings. Recently, pre-trained large language models (LLMs) have exhibited superior performance in understanding and generating natural language, demonstrating great potential for downstream tasks. Inspired by this major trend, we propose ChartGPT, generating charts from abstract natural language inputs. However, LLMs are struggling to address complex logic problems. To enable the model to accurately specify the complex parameters and perform operations in chart generation, we decompose the generation process into a step-by-step reasoning pipeline, so that the model only needs to reason a single and specific sub-task during each run. Moreover, LLMs are pre-trained on general datasets, which might be biased for the task of chart generation. To provide adequate visualization knowledge, we create a dataset consisting of abstract utterances and charts and improve model performance through fine-tuning. We further design an interactive interface for ChartGPT that allows users to check and modify the intermediate outputs of each step. The effectiveness of the proposed system is evaluated through quantitative evaluations and a user study.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yuan Tian"},{"affiliations":"","email":"","is_corresponding":false,"name":"Weiwei Cui"},{"affiliations":"","email":"","is_corresponding":false,"name":"Dazhen Deng"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xinjing Yi"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yurun Yang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haidong Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"10.1109/TVCG.2024.3368621","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243368621","image_caption":"ChartGPT overview. ChartGPT takes a data table and an utterance provided by the user as input (a). To generate the chart, ChartGPT employs a step-by-step transformation process (b) that decomposes the chart generation task into six sequential steps (b1). Each step is solved by the LLM fine-tuned on our constructed dataset (b2). By leveraging the output from each step, ChartGPT generates visualization specifications and presents charts to the user (c).","keywords":["Natural language interfaces, large language models, data visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/TkBPnodArzQ&t=0h37m47s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243368621/v-tvcg-20243368621_Preview.mp4?token=NNF1tI9DBJHzeUfhANxgTkE_e5Wdnhf03qWc96g5uIc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243368621/v-tvcg-20243368621_Preview.srt?token=JSh5JMzUk880YDcFVfesTjDI-6QZABZFEW1cy5mL7Ps&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-tvcg","session_youtube_ff_id":"aOjbYmdr5Y0","session_youtube_ff_link":"https://youtu.be/aOjbYmdr5Y0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h37m47s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T16:36:00Z","title":"ChartGPT: Leveraging LLMs to Generate Charts from Abstract Natural Language","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243408255","abstract":"Generative text-to-image models, which allow users to create appealing images through a text prompt, have seen a dramatic increase in popularity in recent years. However, most users have a limited understanding of how such models work and often rely on trial and error strategies to achieve satisfactory results. The prompt history contains a wealth of information that could provide users with insights into what has been explored and how the prompt changes impact the output image, yet little research attention has been paid to the visual analysis of such process to support users. We propose the Image Variant Graph, a novel visual representation designed to support comparing prompt-image pairs and exploring the editing history. The Image Variant Graph models prompt differences as edges between corresponding images and presents the distances between images through projection. Based on the graph, we developed the PrompTHis system through co-design with artists. Based on the review and analysis of the prompting history, users can better understand the impact of prompt changes and have a more effective control of image generation. A quantitative user study and qualitative interviews demonstrate that PrompTHis can help users review the prompt history, make sense of the model, and plan their creative process.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yuhan Guo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hanning Shao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Can Liu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kai Xu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xiaoru Yuan"}],"award":"","doi":"10.1109/TVCG.2024.3408255","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243408255","image_caption":"When using text-to-image generative models, users might spend a lot of time in trials and errors. PrompTHis is a visual interactive system that supports users to understand how the models work through exploring prompt history. It consists of a novel Image Variant Graph presents how specific word modifications affect the model's outputs and a history box that shows the attempts in temporal order. The figure shows the prompting records of an artist. Starting from a black-and-white drawing of city buildings (1-5), the artist experimented with color styles (6-7, 8-10), and returned to the black-and-white style (11-14), with \u201catomic explosion\u201d inserted later (15).","keywords":["Text visualization, image visualization, text-to-image generation, editing history, provenance, generative art"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2403.09615","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/TkBPnodArzQ&t=0h57m27s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243408255/v-tvcg-20243408255_Preview.mp4?token=keSV0ZsZjbxswmhd2djgTyYh3KPJ_LcbgLW7qmu0kRA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243408255/v-tvcg-20243408255_Preview.srt?token=H_KJT2BEr0QXj2brQhZcW1C4mJEGqg1WlEgPGKBSH40&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full13","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Natural Language and Multimodal Interaction","session_uid":"v-tvcg","session_youtube_ff_id":"fMwAACKA6oA","session_youtube_ff_link":"https://youtu.be/fMwAACKA6oA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/TkBPnodArzQ&t=0h57m27s","sessions":["Natural Language and Multimodal Interaction"],"time_stamp":"2024-10-16T17:00:00Z","title":"PrompTHis: Visualizing the Process and Influence of Prompt Editing during Text-to-Image Creation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1067","abstract":"Large Language Models (LLMs) are powerful but also raise significant security concerns, particularly regarding the harm they can cause, such as generating fake news that manipulates public opinion on social media and providing responses to unethical activities. Traditional red teaming approaches for identifying AI vulnerabilities rely on manual prompt construction and expertise. This paper introduces AdversaFlow, a novel visual analytics system designed to enhance LLM security against adversarial attacks through human-AI collaboration. AdversaFlow involves adversarial training between a target model and a red model, featuring unique multi-level adversarial flow and fluctuation path visualizations. These features provide insights into adversarial dynamics and LLM robustness, enabling experts to identify and mitigate vulnerabilities effectively. We present quantitative evaluations and case studies validating our system's utility and offering insights for future AI security solutions. Our method can enhance LLM security, supporting downstream scenarios like social media regulation by enabling more effective detection, monitoring, and mitigation of harmful content and behaviors.","accessible_pdf":true,"authors":[{"affiliations":["Zhejiang University, Ningbo, China"],"email":"dengdazhen@zju.edu.cn","is_corresponding":true,"name":"Dazhen Deng"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"zhangchuhan024@163.com","is_corresponding":false,"name":"Chuhan Zhang"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"huawzheng@gmail.com","is_corresponding":false,"name":"Huawei Zheng"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"yw.pu@zju.edu.cn","is_corresponding":false,"name":"Yuwen Pu"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"sji@zju.edu.cn","is_corresponding":false,"name":"Shouling Ji"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1067","image_caption":"The interface of AdversaFlow includes a Control Panel (A) to configure model parameters and adjust data sampling, an Embedding View (B) to show the projection of prompts, a Metric Monitor (C) displaying the key performance indicators of the model, an Adversarial Flow to facilitate multi-level exploration of models, an Instance List (E) showing prompt details, and a Flucutaion View (F) for the investigation of token-level uncertainty.","keywords":["Visual Analytics for Machine Learning, Artificial Intelligence Security, Large Language Models, Text Visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/hEywiCiEJO0&t=0h0m54s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1067/v-full-1067_Preview.mp4?token=WdJwebGqoxqRl2J3o3aTwy7jdlbRADEynOHmmZ55jE4&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"NWnvzefxILM","session_youtube_ff_link":"https://youtu.be/NWnvzefxILM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h0m54s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T12:30:00Z","title":"AdversaFlow: Visual Red Teaming for Large Language Models with Multi-Level Adversarial Flow","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1096","abstract":"Large Language Models (LLMs) have shown great potential in intelligent visualization systems, especially for domain-specific applications. Integrating LLMs into visualization systems presents challenges, and we categorize these challenges into three alignments: domain problems with LLMs, visualization with LLMs, and interaction with LLMs. To achieve these alignments, we propose a framework and outline a workflow to guide the application of fine-tuned LLMs to enhance visual interactions for domain-specific tasks. These alignment challenges are critical in education because of the need for an intelligent visualization system to support beginners' self-regulated learning. Therefore, we apply the framework to education and introduce Tailor-Mind, an interactive visualization system designed to facilitate self-regulated learning for artificial intelligence beginners. Drawing on insights from a preliminary study, we identify self-regulated learning tasks and fine-tuning objectives to guide visualization design and tuning data construction. Our focus on aligning visualization with fine-tuned LLM makes Tailor-Mind more like a personalized tutor. Tailor-Mind also supports interactive recommendations to help beginners better achieve their learning goals. Model performance evaluations and user studies confirm that Tailor-Mind improves the self-regulated learning experience, effectively validating the proposed framework.","accessible_pdf":false,"authors":[{"affiliations":["Fudan University, Shanghai, China"],"email":"lgao.lynne@gmail.com","is_corresponding":true,"name":"Lin Gao"},{"affiliations":["Fudan University, ShangHai, China"],"email":"kingluther6666@gmail.com","is_corresponding":false,"name":"Jing Lu"},{"affiliations":["Fudan University, Shanghai, China"],"email":"gemini25szk@gmail.com","is_corresponding":false,"name":"Zekai Shao"},{"affiliations":["Fudan University, Shanghai, China"],"email":"ziyuelin917@gmail.com","is_corresponding":false,"name":"Ziyue Lin"},{"affiliations":["Fudan unversity, ShangHai, China"],"email":"sbyue23@m.fudan.edu.cn","is_corresponding":false,"name":"Shengbin Yue"},{"affiliations":["Fudan University, Shanghai, China"],"email":"chiokit0819@gmail.com","is_corresponding":false,"name":"Chiokit Ieong"},{"affiliations":["Fudan University, Shanghai, China"],"email":"21307130094@m.fudan.edu.cn","is_corresponding":false,"name":"Yi Sun"},{"affiliations":["University of Vienna, Vienna, Austria"],"email":"rory.james.zauner@univie.ac.at","is_corresponding":false,"name":"Rory Zauner"},{"affiliations":["Fudan University, Shanghai, China"],"email":"zywei@fudan.edu.cn","is_corresponding":false,"name":"Zhongyu Wei"},{"affiliations":["Fudan University, Shanghai, China"],"email":"simingchen3@gmail.com","is_corresponding":false,"name":"Siming Chen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1096","image_caption":"In applying workflow to Self-Regulated Learning (SRL) in education, we outline the process in three phases. Phase 1 involves establishing a fundamental understanding of the SRL task (A1) and collecting data on artificial intelligence (A2). The design requirements (B) align with the design requirements. Phase 2 details the SRL pipeline sub-tasks and visualizations (C1), leading to the creation of fine-tuning data (C2). In phase 3, we enhance the fine-tuning effects and visualization interactions by integrating user feedback within the visualization system. ","keywords":["Fine-tuned large language model, visualization system, self-regulated learning, intelligent tutorial system"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.20570","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/hEywiCiEJO0&t=0h27m47s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1096/v-full-1096_Preview.mp4?token=SgWKRslsK3PCv0V7DHhhG02KJ8saIlA87RUWJ-i9jKg&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"KR_r6ARzzx0","session_youtube_ff_link":"https://youtu.be/KR_r6ARzzx0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h27m47s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T12:54:00Z","title":"Fine-Tuned Large Language Model for Visualization System: A Study on Self-Regulated Learning in Education","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1193","abstract":"Emerging multimodal large language models (MLLMs) exhibit great potential for chart question answering (CQA). Recent efforts primarily focus on scaling up training datasets (i.e., charts, data tables, and question-answer (QA) pairs) through data collection and synthesis. However, our empirical study on existing MLLMs and CQA datasets reveals notable gaps. First, current data collection and synthesis focus on data volume and lack consideration of fine-grained visual encodings and QA tasks, resulting in unbalanced data distribution divergent from practical CQA scenarios. Second, existing work follows the training recipe of the base MLLMs initially designed for natural images, under-exploring the adaptation to unique chart characteristics, such as rich text elements. To fill the gap, we propose a visualization-referenced instruction tuning approach to guide the training dataset enhancement and model development. Specifically, we propose a novel data engine to effectively filter diverse and high-quality data from existing datasets and subsequently refine and augment the data using LLM-based generation techniques to better align with practical QA tasks and visual encodings. Then, to facilitate the adaptation to chart characteristics, we utilize the enriched data to train an MLLM by unfreezing the vision encoder and incorporating a mixture-of-resolution adaptation strategy for enhanced fine-grained recognition. Experimental results validate the effectiveness of our approach. Even with fewer training examples, our model consistently outperforms state-of-the-art CQA models on established benchmarks. We also contribute a dataset split as a benchmark for future research. Source codes and datasets of this paper are available at https://github.com/zengxingchen/ChartQA-MLLM.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"xingchen.zeng@outlook.com","is_corresponding":true,"name":"Xingchen Zeng"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"hlin386@connect.hkust-gz.edu.cn","is_corresponding":false,"name":"Haichuan Lin"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"yyebd@connect.ust.hk","is_corresponding":false,"name":"Yilin Ye"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China","The Hong Kong University of Science and Technology, Hong Kong SAR, China"],"email":"weizeng@hkust-gz.edu.cn","is_corresponding":false,"name":"Wei Zeng"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1193","image_caption":"Comparison of our model with state-of-the-art MLLMs on chart question answering. Existing MLLMs often fail to understand visual mappings, such as inverted Y-axis, truncated axis, bubble sizing, and area stacking. In contrast, our model, trained with the visualization-referenced dataset we constructed, showcases a better understanding of visualization domain knowledge.","keywords":["Chart-question answering, multimodal large language models, benchmark"],"open_access_supplemental_link":"https://github.com/zengxingchen/ChartQA-MLLM","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.20174","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/hEywiCiEJO0&t=0h40m31s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1193/v-full-1193_Preview.mp4?token=g7956VuPuX9qXmOO9QhCifFhYpHqQxRR8sdHM3xSlXQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1193/v-full-1193_Preview.srt?token=3nmqQ4mX2ixqqnQRhaSQNz-6xKRYFtzLC9_Vv-EOzcg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"fiE38Zyk9VY","session_youtube_ff_link":"https://youtu.be/fiE38Zyk9VY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h40m31s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T13:06:00Z","title":"Advancing Multimodal Large Language Models in Chart Question Answering with Visualization-Referenced Instruction Tuning","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1326","abstract":"Evaluating large language models (LLMs) presents unique challenges. While automatic side-by-side evaluation, also known as LLM-as-a-judge, has become a promising solution, model developers and researchers face difficulties with scalability and interpretability when analyzing these evaluation outcomes. To address these challenges, we introduce LLM Comparator, a new visual analytics tool designed for side-by-side evaluations of LLMs. This tool provides analytical workflows that help users understand when and why one LLM outperforms or underperforms another, and how their responses differ. Through close collaboration with practitioners developing LLMs at Google, we have iteratively designed, developed, and refined the tool. Qualitative feedback from these users highlights that the tool facilitates in-depth analysis of individual examples while enabling users to visually overview and flexibly slice data. This empowers users to identify undesirable patterns, formulate hypotheses about model behavior, and gain insights for model improvement. LLM Comparator has been integrated into Google's LLM evaluation platforms and open-sourced.","accessible_pdf":false,"authors":[{"affiliations":["Google, Atlanta, United States"],"email":"minsuk.kahng@gmail.com","is_corresponding":true,"name":"Minsuk Kahng"},{"affiliations":["Google Research, Seattle, United States"],"email":"iftenney@google.com","is_corresponding":false,"name":"Ian Tenney"},{"affiliations":["Google Research, Cambridge, United States"],"email":"mahimap@google.com","is_corresponding":false,"name":"Mahima Pushkarna"},{"affiliations":["Google Research, Pittsburgh, United States"],"email":"lxieyang.cmu@gmail.com","is_corresponding":false,"name":"Michael Xieyang Liu"},{"affiliations":["Google Research, Cambridge, United States"],"email":"jwexler@google.com","is_corresponding":false,"name":"James Wexler"},{"affiliations":["Google, Cambridge, United States"],"email":"ereif@google.com","is_corresponding":false,"name":"Emily Reif"},{"affiliations":["Google Research, Mountain View, United States"],"email":"kallarackal@google.com","is_corresponding":false,"name":"Krystal Kallarackal"},{"affiliations":["Google Research, Seattle, United States"],"email":"minsuk.cs@gmail.com","is_corresponding":false,"name":"Minsuk Chang"},{"affiliations":["Google, Cambridge, United States"],"email":"michaelterry@google.com","is_corresponding":false,"name":"Michael Terry"},{"affiliations":["Google, Paris, France"],"email":"ldixon@google.com","is_corresponding":false,"name":"Lucas Dixon"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1326","image_caption":"LLM Comparator is a visual analytics tool consisting of multiple views: an interactive table which displays individual prompts and model responses, and a visualization summary which comprises multiple panels, including score distribution, metrics by prompt category, rationale clusters, n-grams, and custom functions.","keywords":["Visual analytics, large language models, model evaluation, responsible AI, machine learning interpretability."],"open_access_supplemental_link":"https://github.com/PAIR-code/llm-comparator","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/hEywiCiEJO0&t=0h14m53s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1326/v-full-1326_Preview.mp4?token=GYD8fWb2Fu3OrvKSiArk2XzM-LoMhtGk3WbHqeN0LKM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1326/v-full-1326_Preview.srt?token=TQ8-neIoCwJpayqgGoorO3YdwC5UL97fg-L_QNP5Qr0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"DVHN9srNTkk","session_youtube_ff_link":"https://youtu.be/DVHN9srNTkk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h14m53s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T12:42:00Z","title":"LLM Comparator: Interactive Analysis of Side-by-Side Evaluation of Large Language Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1503","abstract":"The increasing reliance on Large Language Models (LLMs) for health information seeking can pose severe risks due to the potential for misinformation and the complexity of these topics. This paper introduces KnowNet, a visualization system that integrates LLMs with Knowledge Graphs (KG) to provide enhanced accuracy and structured exploration. One core idea in KnowNet is to conceptualize the understanding of a subject as the gradual construction of graph visualization, aligning the user's cognitive process with both the structured data in KGs and the unstructured outputs from LLMs. Specifically, we extracted triples (e.g., entities and their relations) from LLM outputs and mapped them into the validated information and supported evidence in external KGs. Based on the neighborhood of the currently explored entities in KGs, KnowNet provides recommendations for further inquiry, aiming to guide a comprehensive understanding without overlooking critical aspects. A progressive graph visualization is proposed to show the alignment between LLMs and KGs, track previous inquiries, and connect this history with current queries and next-step recommendations. We demonstrate the effectiveness of our system via use cases and expert interviews.","accessible_pdf":false,"authors":[{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"yan00111@umn.edu","is_corresponding":false,"name":"Youfu Yan"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"hou00127@umn.edu","is_corresponding":false,"name":"Yu Hou"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"xiao0290@umn.edu","is_corresponding":false,"name":"Yongkang Xiao"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"zhan1386@umn.edu","is_corresponding":false,"name":"Rui Zhang"},{"affiliations":["University of Minnesota, Minneapolis , United States"],"email":"qianwen@umn.edu","is_corresponding":true,"name":"Qianwen Wang"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1503","image_caption":"In contrast to traditional LLM question-answering, which often generate lengthy and unverified text, KNOWNET leverages external knowledge graph (KG) to enhance health information seeking with LLM. KNOWNET provides validation through literature for accuracy, next-step recommendations for comprehensive exploration, and step-by-step graph visualization for a progressive understanding of the topic.","keywords":["Human-AI interactions, knowledge graph, conversational agent, large language model, progressive visualization"],"open_access_supplemental_link":"https://visual-intelligence-umn.github.io/KNOWNET/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.13598","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/hEywiCiEJO0&t=0h52m25s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1503/v-full-1503_Preview.mp4?token=DCKH1_qnxfb-cM6f17TTxp3U6FmGK7tYd4K1adE1Oy8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1503/v-full-1503_Preview.srt?token=qNHGukqjokAVMKtDPxeimr-sum2x_sotCz1_PJ6Vf2s&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"_eV967qYScs","session_youtube_ff_link":"https://youtu.be/_eV967qYScs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h52m25s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T13:30:00Z","title":"Guided Health-related Information Seeking from LLMs via Knowledge Graph Integration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1544","abstract":"Large Language Models (LLMs) have been adopted for a variety of visualizations tasks, but how far are we from perceptually aware LLMs that can predict human takeaways? Graphical perception literature has shown that human chart takeaways are sensitive to visualization design choices, such as spatial layouts. In this work, we examine the extent to which LLMs exhibit such sensitivity when generating takeaways, using bar charts with varying spatial layouts as a case study. We conducted three experiments and tested four common bar chart layouts: vertically juxtaposed, horizontally juxtaposed, overlaid, and stacked. In Experiment 1, we identified the optimal configurations to generate meaningful chart takeaways by testing four LLMs, two temperature settings, nine chart specifications, and two prompting strategies. We found that even state-of-the-art LLMs struggled to generate semantically diverse and factually accurate takeaways. In Experiment 2, we used the optimal configurations to generate 30 chart takeaways each for eight visualizations across four layouts and two datasets in both zero-shot and one-shot settings. Compared to human takeaways, we found that the takeaways LLMs generated often did not match the types of comparisons made by humans. In Experiment 3, we examined the effect of chart context and data on LLM takeaways. We found that LLMs, unlike humans, exhibited variation in takeaway comparison types for different bar charts using the same bar layout. Overall, our case study evaluates the ability of LLMs to emulate human interpretations of data and points to challenges and opportunities in using LLMs to predict human chart takeaways.","accessible_pdf":false,"authors":[{"affiliations":["University of Washington, Seattle, United States"],"email":"wwill@cs.washington.edu","is_corresponding":true,"name":"Huichen Will Wang"},{"affiliations":["Adobe Research, Seattle, United States"],"email":"jhoffs@adobe.com","is_corresponding":false,"name":"Jane Hoffswell"},{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"yukithane@gmail.com","is_corresponding":false,"name":"Sao Myat Thazin Thane"},{"affiliations":["Adobe Research, San Jose, United States"],"email":"victorbursztyn2022@u.northwestern.edu","is_corresponding":false,"name":"Victor S. Bursztyn"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"cxiong@gatech.edu","is_corresponding":false,"name":"Cindy Xiong Bearfield"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1544","image_caption":"There is a discrepancy between human chart takeaways and predictions of human chart takeaways generated by large language models. For a chart that shows the prices of three drinks in two bars, a human would tend to compare the prices of Drink 2 between the two bars, but the model predicts a human to compare the prices of the three drinks in Bar B.","keywords":["Visualization, Graphical Perception, Large Language Models"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/hEywiCiEJO0&t=0h52m20s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1544/v-full-1544_Preview.mp4?token=xASKJTPbGCrNglAxnnLQVlf3Wmp5QMpznTvs0msc0MA&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full14","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Look, Learn, Language Models","session_uid":"v-full","session_youtube_ff_id":"L_tj96AoLnI","session_youtube_ff_link":"https://youtu.be/L_tj96AoLnI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/hEywiCiEJO0&t=0h52m20s","sessions":["Look, Learn, Language Models"],"time_stamp":"2024-10-18T13:18:00Z","title":"How Aligned are Human Chart Takeaways and LLM Predictions? A Case Study on Bar Charts with Varying Layouts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1533","abstract":"We introduce DiffFit, a differentiable algorithm for fitting protein atomistic structures into an experimental reconstructed Cryo-Electron Microscopy (cryo-EM) volume map. In structural biology, this process is necessary to semi-automatically composite large mesoscale models of complex protein assemblies and complete cellular structures that are based on measured cryo-EM data. The current approaches require manual fitting in three dimensions to start, resulting in approximately aligned structures followed by an automated fine-tuning of the alignment. The DiffFit approach enables domain scientists to fit new structures automatically and visualize the results for inspection and interactive revision. The fitting begins with differentiable three-dimensional (3D) rigid transformations of the protein atom coordinates followed by sampling the density values at the atom coordinates from the target cryo-EM volume. To ensure a meaningful correlation between the sampled densities and the protein structure, we proposed a novel loss function based on a multi-resolution volume-array approach and the exploitation of the negative space. This loss function serves as a critical metric for assessing the fitting quality, ensuring the fitting accuracy and an improved visualization of the results. We assessed the placement quality of DiffFit with several large, realistic datasets and found it to be superior to that of previous methods. We further evaluated our method in two use cases: automating the integration of known composite structures into larger protein complexes and facilitating the fitting of predicted protein domains into volume densities to aid researchers in identifying unknown proteins. We implemented our algorithm as an open-source plugin (github.com/nanovis/DiffFitViewer) in ChimeraX, a leading visualization software in the field. All supplemental materials are available at osf.io/5tx4q.","accessible_pdf":false,"authors":[{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"deng.luo@kaust.edu.sa","is_corresponding":true,"name":"Deng Luo"},{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"zainab.alsuwaykit@kaust.edu.sa","is_corresponding":false,"name":"Zainab Alsuwaykit"},{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"dawar.khan@kaust.edu.sa","is_corresponding":false,"name":"Dawar Khan"},{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"ondrej.strnad@kaust.edu.sa","is_corresponding":false,"name":"Ond\u0159ej Strnad"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tobias.isenberg@gmail.com","is_corresponding":false,"name":"Tobias Isenberg"},{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"ivan.viola@kaust.edu.sa","is_corresponding":false,"name":"Ivan Viola"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1533","image_caption":"DiffFit workflow. The target cryo-EM volume and the structures to be fit on the top left serve as inputs, which are passed into the novel volume processing, followed by the differentiable fitting algorithm. The fitting results are then clustered and inspected by the expert. The expert may zero out voxels corresponding to the placed structures and feed the map back iteratively as input for a new fitting round until the compositing is done.","keywords":["Scalar field data, algorithms, application-motivated visualization, process/workflow design, life sciences, health, medicine, biology, structural biology, bioinformatics, genomics, cryo-EM"],"open_access_supplemental_link":"https://osf.io/5tx4q/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2404.02465","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Za3820yadmE&t=0h0m24s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1533/v-full-1533_Preview.mp4?token=V8OcgojfOjgG9iatxgIF8UGOAImc5hoAlvO4BVA5RCg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1533/v-full-1533_Preview.srt?token=YiBU9ZXJyfd4EKHrfRQNns5cPKwO7m2mkW2OnbcRTGg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-full","session_youtube_ff_id":"ptmTip8km8k","session_youtube_ff_link":"https://youtu.be/ptmTip8km8k","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=0h0m24s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T14:15:00Z","title":"DiffFit: Visually-Guided Differentiable Fitting of Molecule Structures to a Cryo-EM Map","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1597","abstract":"In understanding and redesigning the function of proteins in modern biochemistry, protein engineers are increasingly focusing on exploring regions in proteins called loops. Analyzing various characteristics of these regions helps the experts design the transfer of the desired function from one protein to another. This process is denoted as loop grafting. We designed a set of interactive visualizations that provide experts with visual support through all the loop grafting pipeline steps. The workflow is divided into several phases, reflecting the steps of the pipeline. Each phase is supported by a specific set of abstracted 2D visual representations of proteins and their loops that are interactively linked with the 3D View of proteins. By sequentially passing through the individual phases, the user shapes the list of loops that are potential candidates for loop grafting. Finally, the actual in-silico insertion of the loop candidates from one protein to the other is performed, and the results are visually presented to the user. In this way, the fully computational rational design of proteins and their loops results in newly designed protein structures that can be further assembled and tested through in-vitro experiments. We showcase the contribution of our visual support design on a real case scenario changing the enantiomer selectivity of the engineered enzyme. Moreover, we provide the readers with the experts\u2019 feedback.","accessible_pdf":false,"authors":[{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic"],"email":"kiraa@mail.muni.cz","is_corresponding":true,"name":"Filip Op\u00e1len\u00fd"},{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic"],"email":"paloulbrich@gmail.com","is_corresponding":false,"name":"Pavol Ulbrich"},{"affiliations":["Masaryk University, Brno, Czech Republic","St. Anne\u2019s University Hospital, Brno, Czech Republic"],"email":"joan.planas@mail.muni.cz","is_corresponding":false,"name":"Joan Planas-Iglesias"},{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic","University of Bergen, Bergen, Norway"],"email":"xbyska@fi.muni.cz","is_corresponding":false,"name":"Jan By\u0161ka"},{"affiliations":["Masaryk University, Brno, Czech Republic","St. Anne\u2019s University Hospital, Brno, Czech Republic"],"email":"stourac.jan@gmail.com","is_corresponding":false,"name":"Jan \u0160toura\u010d"},{"affiliations":["Faculty of Science, Masaryk University, Brno, Czech Republic","St. Anne\u2019s University Hospital Brno, Brno, Czech Republic"],"email":"222755@mail.muni.cz","is_corresponding":false,"name":"David Bedn\u00e1\u0159"},{"affiliations":["Faculty of Informatics, Masaryk University, Brno, Czech Republic"],"email":"katarina.furmanova@gmail.com","is_corresponding":false,"name":"Katar\u00edna Furmanov\u00e1"},{"affiliations":["Masaryk University, Brno, Czech Republic"],"email":"kozlikova@fi.muni.cz","is_corresponding":false,"name":"Barbora Kozlikova"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1597","image_caption":"Protein engineers are focusing on protein loops to design novel proteins through a process called loop grafting. This involves transferring loops to transfer some desired functions from one protein to another. This paper introduces a set of interactive visualizations that support experts throughout the loop grafting pipeline. The workflow is divided into phases, each with specific 2D and 3D visual representations of proteins and their loops. With the aid of these visualizations, users iteratively identify potential loop candidates before performing an in-silico loop grafting and visualizing the results. The approach was validated with an expert case study, demonstrating its effectiveness.","keywords":["Protein visualization, protein engineering, loop grafting, abstract views"],"open_access_supplemental_link":"https://gitlab.fi.muni.cz/visitlab/loopgrafter-frontend-1.2","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.20054","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Za3820yadmE&t=1h1m19s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1597/v-full-1597_Preview.mp4?token=dqG5O6MdgNjF_Kd8T_AnVvxZsuj2zZfem9zMErGW_OM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1597/v-full-1597_Preview.srt?token=pfEKxAwTz0x9qiWxPObe6aSIrQZvBrkeGs6OdI0iXL0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-full","session_youtube_ff_id":"TjB6UTqQMHc","session_youtube_ff_link":"https://youtu.be/TjB6UTqQMHc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=1h1m19s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T15:15:00Z","title":"Visual Support for the Loop Grafting Workflow on Proteins","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1615","abstract":"We present Cell2Cell, a novel visual analytics approach for quantifying and visualizing networks of cell-cell interactions in three-dimensional (3D) multi-channel cancerous tissue data. By analyzing cellular interactions, biomedical experts can gain a more accurate understanding of the intricate relationships between cancer and immune cells. Recent methods have focused on inferring interaction based on the proximity of cells in low-resolution 2D multi-channel imaging data. By contrast, we analyze cell interactions by quantifying the presence and levels of specific proteins within a tissue sample (protein expressions) extracted from high-resolution 3D multi-channel volume data. Such analyses have a strong exploratory nature and require a tight integration of domain experts in the analysis loop to leverage their deep knowledge. We propose two complementary semi-automated approaches to cope with the increasing size and complexity of the data interactively: On the one hand, we interpret cell-to-cell interactions as edges in a cell graph and analyze the image signal (protein expressions) along those edges, using spatial as well as abstract visualizations. Complementary, we propose a cell-centered approach, enabling scientists to visually analyze polarized distributions of proteins in three dimensions, which also captures neighboring cells with biochemical and cell biological consequences. We evaluate our application in three case studies, where biologists and medical experts use \\tool to investigate tumor micro-environments to identify and quantify T-cell activation in human tissue data. We confirmed that our tool can fully solve the use cases and enables a streamlined and detailed analysis of cell-cell interactions.","accessible_pdf":false,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"eric.moerth@gmx.at","is_corresponding":true,"name":"Eric M\u00f6rth"},{"affiliations":["University of Vienna, Vienna, Austria"],"email":"kevin.sidak@univie.ac.at","is_corresponding":false,"name":"Kevin Sidak"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"zoltan_maliga@hms.harvard.edu","is_corresponding":false,"name":"Zoltan Maliga"},{"affiliations":["University of Vienna, Vienna, Austria"],"email":"torsten.moeller@univie.ac.at","is_corresponding":false,"name":"Torsten M\u00f6ller"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"peter_sorger@hms.harvard.edu","is_corresponding":false,"name":"Peter Sorger"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"pfister@seas.harvard.edu","is_corresponding":false,"name":"Hanspeter Pfister"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"jbeyer@g.harvard.edu","is_corresponding":false,"name":"Johanna Beyer"},{"affiliations":["New York University, New York, United States","Harvard University, Boston, United States"],"email":"rk4815@nyu.edu","is_corresponding":false,"name":"Robert Kr\u00fcger"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1615","image_caption":"Cell2Cell is a web-based visual analytics system to analyze interactions of cells in 3D biological tissue imaging data. a) Multi-volume viewer using pseudo-colors. The embedded interaction graph displays cells (nodes) and their interactions (edges). b) Cell interaction profiles show the spatial intensity distribution of protein markers between cells. c) Multiple interactions can be compared channel by channel. d) Heatmaps (overview) and line charts (details) can be toggled on demand. e) Radial polarization charts enable cell-centric analysis. f) The side panel allows users to customize color settings and (de)activate channels.","keywords":["Biomedical visualization, 3D multi-channel tissue data, Direct volume rendering, Quantitative analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/axy82","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Za3820yadmE&t=0h48m12s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1615/v-full-1615_Preview.mp4?token=3texbGZWm2_V72b-KPt0rGFoMxv76gsFkNDOwCnQ1FE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1615/v-full-1615_Preview.srt?token=aX17wdDATdRzBDANHUFadpD1V288PgtmL8VMdidF2O8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-full","session_youtube_ff_id":"wVBlWgy1Gd8","session_youtube_ff_link":"https://youtu.be/wVBlWgy1Gd8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=0h48m12s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T15:03:00Z","title":"Cell2Cell: Explorative Cell Interaction Analysis in Multi-Volumetric Tissue Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233337642","abstract":"Molecular docking is a key technique in various fields like structural biology, medicinal chemistry, and biotechnology. It is widely used for virtual screening during drug discovery, computer-assisted drug design, and protein engineering. A general molecular docking process consists of the target and ligand selection, their preparation, and the docking process itself, followed by the evaluation of the results. However, the most commonly used docking software provides no or very basic evaluation possibilities. Scripting and external molecular viewers are often used, which are not designed for an efficient analysis of docking results. Therefore, we developed InVADo, a comprehensive interactive visual analysis tool for large docking data. It consists of multiple linked 2D and 3D views. It filters and spatially clusters the data, and enriches it with post-docking analysis results of protein-ligand interactions and functional groups, to enable well-founded decision-making. In an exemplary case study, domain experts confirmed that InVADo facilitates and accelerates the analysis workflow. They rated it as a convenient, comprehensive, and feature-rich tool, especially useful for virtual screening.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Marco Sch\u00e4fer"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nicolas Brich"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jan By\u0161ka"},{"affiliations":"","email":"","is_corresponding":false,"name":"S\u00e9rgio M. Marques"},{"affiliations":"","email":"","is_corresponding":false,"name":"David Bedn\u00e1\u0159"},{"affiliations":"","email":"","is_corresponding":false,"name":"Philipp Thiel"},{"affiliations":"","email":"","is_corresponding":false,"name":"Barbora Kozl\u00edkov\u00e1"},{"affiliations":"","email":"","is_corresponding":true,"name":"Michael Krone"}],"award":"","doi":"10.1109/TVCG.2023.3337642","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233337642","image_caption":"InVADo (Interactive Visual Analysis of Molecular Docking Data) is a visual analytics tool for molecular docking data. It allows users to interactively rank, filter, and cluster the docked compounds and offers a combination of linked 3D and 2D views providing information about the spatial arrangement of the molecules, the type of interaction, or propensities for certain functional groups. The goal of the exploratory visual analysis approach supported by InVADo is to support drug design and similar biochemical applications. ","keywords":["Molecular Docking, AutoDock, Virtual Screening, Visual Analysis, Visualization, Clustering, Protein-Ligand Interaction."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Za3820yadmE&t=0h23m15s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233337642/v-tvcg-20233337642_Preview.mp4?token=rfeTvNDsaMGzJH6vUQuVUxJMeGbCJpnLMVqOySvUdDU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"AGsPOoexonM","session_youtube_ff_link":"https://youtu.be/AGsPOoexonM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=0h23m15s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T14:39:00Z","title":"InVADo: Interactive Visual Analysis of Molecular Docking Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243385118","abstract":"Genomics is at the core of precision medicine, and there are high expectations on genomics-enabled improvement of patient outcomes in the years to come. Around the world, initiatives to increase the use of DNA sequencing in clinical routine are being deployed, such as the use of broad panels in the standard care for oncology patients. Such a development comes at the cost of increased demands on throughput in genomic data analysis. In this paper, we use the task of copy number variant (CNV) analysis as a context for exploring visualization concepts for clinical genomics. CNV calls are generated algorithmically, but time-consuming manual intervention is needed to separate relevant findings from irrelevant ones in the resulting large call candidate lists. We present a visualization environment, named Copycat, to support this review task in a clinical scenario.Key components are a scatter-glyph plot replacing the traditional list visualization, and a glyph representation designed for at-a-glance relevance assessments. Moreover, we present results from a formative evaluation of the prototype by domain specialists, from which we elicit insights to guide both prototype improvements and visualization for clinical genomics in general.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Emilia St\u00e5hlbom"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jesper Molin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Claes Lundstr\u00f6m"},{"affiliations":"","email":"","is_corresponding":false,"name":"Anders Ynnerman"}],"award":"","doi":"10.1109/TVCG.2024.3385118","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243385118","image_caption":"We created a visualization environment for reviewing genomics data in clinical settings, specifically aimed at review of structural variation. The design utilizes the visual space to through a scatter-glyph plot, and supports an iterative workflow with overview first and details on demand. The position and the three parts of the glyph encode the most important information, and each part of the glyph is designed to utilize a unique visual information channel, minimizing interference and allowing for at-a-glance evaluation of each glyph.","keywords":["Visualization, genomics, copy number variants, clinical decision support, evaluation"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Za3820yadmE&t=0h36m14s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243385118/v-tvcg-20243385118_Preview.mp4?token=wAnNZDc4qcgxf5ZCybmQqWXFTwYRvvAy0ATyhxuTuL0&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"AHFJlbQhYVA","session_youtube_ff_link":"https://youtu.be/AHFJlbQhYVA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=0h36m14s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T14:51:00Z","title":"Visualization for diagnostic review of copy number variants in complex DNA sequencing data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243411786","abstract":"We present a novel method for the interactive construction and rendering of extremely large molecular scenes, capable of representing multiple biological cells in atomistic detail. Our method is tailored for scenes, which are procedurally constructed, based on a given set of building rules. Rendering of large scenes normally requires the entire scene available in-core, or alternatively, it requires out-of-core management to load data into the memory hierarchy as a part of the rendering loop. Instead of out-of-core memory management, we propose to procedurally generate the scene on-demand on the fly. The key idea is a positional- and view-dependent procedural scene-construction strategy, where only a fraction of the atomistic scene around the camera is available in the GPU memory at any given time. The atomistic detail is populated into a uniform-space partitioning using a grid that covers the entire scene. Most of the grid cells are not filled with geometry, only those are populated that are potentially seen by the camera. The atomistic detail is populated in a compute shader and its representation is connected with acceleration data structures for hardware ray-tracing of modern GPUs. Objects which are far away, where atomistic detail is not perceivable from a given viewpoint, are represented by a triangle mesh mapped with a seamless texture, generated from the rendering of geometry from atomistic detail. The algorithm consists of two pipelines, the construction-compute pipeline, and the rendering pipeline, which work together to render molecular scenes at an atomistic resolution far beyond the limit of the GPU memory containing trillions of atoms. We demonstrate our technique on multiple models of SARS-CoV-2 and the red blood cell.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Ruwayda Alharbi"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ond\u02c7rej Strnad"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tobias Klein"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ivan Viola"}],"award":"","doi":"10.1109/TVCG.2024.3411786","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243411786","image_caption":"Several populated SARS-CoV-2 virions over Red blood cell particles. The fully textured proxy geometries with partially populated atomistic details are presented in the top-left part, whereas the bottom-right part showcases the continuous Wang tiling used for placement of atomistic details.","keywords":["Interactive rendering, view-guided scene construction, biological data, hardware ray tracing"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2204.05762","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Za3820yadmE&t=0h10m16s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243411786/v-tvcg-20243411786_Preview.mp4?token=Sfz1lUTn5a3NOZ0Ph_-sj5NUxB6j2ZFi0aFaci4A1-8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243411786/v-tvcg-20243411786_Preview.srt?token=qaKnYH41-gSfCdIe9N907yzyt99YIX9TAQx23ahNJIo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full15","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Biological Data Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"e377VOBXmUw","session_youtube_ff_link":"https://youtu.be/e377VOBXmUw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Za3820yadmE&t=0h10m16s","sessions":["Biological Data Visualization"],"time_stamp":"2024-10-16T14:27:00Z","title":"\u201cNanomatrix: Scalable Construction of Crowded Biological Environments\u201d","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1150","abstract":"Composite visualization represents a widely embraced design that combines multiple visual representations to create an integrated view.However, the traditional approach of creating composite visualizations in immersive environments typically occurs asynchronously outside of the immersive space and is carried out by experienced experts.In this work, we aim to empower users to participate in the creation of composite visualization within immersive environments through embodied interactions.This could provide a flexible and fluid experience with immersive visualization and has the potential to facilitate understanding of the relationship between visualization views. We begin with developing a design space of embodied interactions to create various types of composite visualizations with the consideration of data relationships. Drawing inspiration from people's natural experience of manipulating physical objects, we design interactions based on the combination of 3D manipulations in immersive environments. Building upon the design space, we present a series of case studies showcasing the interaction to create different kinds of composite visualizations in virtual reality.Subsequently, we conduct a user study to evaluate the usability of the derived interaction techniques and user experience of creating composite visualizations through embodied interactions.We find that empowering users to participate in composite visualizations through embodied interactions enables them to flexibly leverage different visualization views for understanding and communicating the relationships between different views, which underscores the potential of several future application scenarios.","accessible_pdf":true,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China","The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"qzhual@connect.ust.hk","is_corresponding":false,"name":"Qian Zhu"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States","Georgia Institute of Technology, Atlanta, United States"],"email":"luttul@umich.edu","is_corresponding":false,"name":"Tao Lu"},{"affiliations":["Adobe Research, San Jose, United States","Adobe Research, San Jose, United States"],"email":"sguo@adobe.com","is_corresponding":false,"name":"Shunan Guo"},{"affiliations":["Hong Kong University of Science and Technology, Hong Kong, Hong Kong","Hong Kong University of Science and Technology, Hong Kong, Hong Kong"],"email":"mxj@cse.ust.hk","is_corresponding":false,"name":"Xiaojuan Ma"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States","Georgia Institute of Technology, Atlanta, United States"],"email":"yalongyang@hotmail.com","is_corresponding":true,"name":"Yalong Yang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1150","image_caption":"This image shows the five cases that represent the idea of our paper: Using embodied interaction to create composite visualization in immersive environments.","keywords":["Composite Visualization, Immersive Analytics, Embodied Interaction"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.02240","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ARyWz3510nk&t=0h0m56s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1150/v-full-1150_Preview.mp4?token=3zcKF5eUe8oDzLwVnn2EnfyD7Lz5z_AI81xkAMxN1Ew&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1150/v-full-1150_Preview.srt?token=BgC1E4VgKY7PVi-dOg8GTMXH_hLldpvlolB0BK8D9-E&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-full","session_youtube_ff_id":"vngAibFJrlE","session_youtube_ff_link":"https://youtu.be/vngAibFJrlE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=0h0m56s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T12:30:00Z","title":"CompositingVis: Exploring Interaction for Creating Composite Visualizations in Immersive Environments","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1699","abstract":"Room-scale immersive data visualisations provide viewers a wide-scale overview of a large dataset, but to interact precisely with individual data points they typically have to navigate to change their point of view. In traditional screen-based visualisations, focus-and-context techniques allow visualisation users to keep a full dataset in view while making detailed selections. Such techniques have been studied extensively on desktop to allow precise selection within large data sets, but they have not been explored in immersive 3D modalities. In this paper we develop a novel immersive focus-and-context technique based on a ``magic portal'' metaphor adapted specifically for data visualisation scenarios. An extendable-hand interaction technique is used to place a portal close to the region of interest.The other end of the portal then opens comfortably within the user's physical reach such that they can reach through to precisely select individual data points.Through a controlled study with 12 participants, we find strong evidence that portals reduce overshoots in selection and overall hand trajectory length, reducing arm and shoulder fatigue compared to ranged interaction without the portal.The portals also enable us to use a robot arm to provide haptic feedback for data within the limited volume of the portal region. In a second study with another 12 participants we found that haptics provided a positive experience (qualitative feedback) but did not significantly reduce fatigue. We demonstrate applications for portal-based selection through two use-case scenarios.","accessible_pdf":true,"authors":[{"affiliations":["Monash University, Melbourne, Australia"],"email":"dai.shaozhang@gmail.com","is_corresponding":false,"name":"Shaozhang Dai"},{"affiliations":["Monash University, Melbourne, Australia"],"email":"yi.li5@monash.edu","is_corresponding":false,"name":"Yi Li"},{"affiliations":["The University of British Columbia (Okanagan Campus), Kelowna, Canada"],"email":"barrett.ens@ubc.ca","is_corresponding":false,"name":"Barrett Ens"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"lonni.besancon@gmail.com","is_corresponding":true,"name":"Lonni Besan\u00e7on"},{"affiliations":["Monash University, Melbourne, Australia"],"email":"tgdwyer@gmail.com","is_corresponding":false,"name":"Tim Dwyer"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1699","image_caption":"Magic Portal for data selection. User extends virtual arm to place portal near distant data. Portal opens within reach, allowing easy selection of distant points. Robot arm provides haptic feedback for interactions through the portal.","keywords":["immersive analytics, focus-and-context, remote interaction, portal, haptic feedback"],"open_access_supplemental_link":"https://osf.io/afmwx/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/6c7za?view_only=","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ARyWz3510nk&t=0h38m35s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1699/v-full-1699_Preview.mp4?token=wT8OvZyLiVvdKlORk7nzC3ZNKxJnACWjffJwJAYdzQU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1699/v-full-1699_Preview.srt?token=qiiiLObmOmzdMBa6T5PyiZm9r3DRn9EQAmrxulLxlZs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-full","session_youtube_ff_id":"hJ1I_66AuK0","session_youtube_ff_link":"https://youtu.be/hJ1I_66AuK0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=0h38m35s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T13:06:00Z","title":"Precise Embodied Data Selection in Room-scale Visualisations While Retaining View Context","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233299602","abstract":"Data transformation is an essential step in data science. While experts primarily use programming to transform their data, there is an increasing need to support non-programmers with user interface-based tools. With the rapid development in interaction techniques and computing environments, we report our empirical findings about the effects of interaction techniques and environments on performing data transformation tasks. Specifically, we studied the potential benefits of direct interaction and virtual reality (VR) for data transformation. We compared gesture interaction versus a standard WIMP user interface, each on the desktop and in VR. With the tested data and tasks, we found time performance was similar between desktop and VR. Meanwhile, VR demonstrates preliminary evidence to better support provenance and sense-making throughout the data transformation process. Our exploration of performing data transformation in VR also provides initial affirmation for enabling an iterative and fully immersive data science workflow.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Sungwon In"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tica Lin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chris North"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hanspeter Pfister"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yalong Yang"}],"award":"","doi":"10.1109/TVCG.2023.3299602","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233299602","image_caption":"Four conditions designed for performing data transformation in the user study, including a combination of desktop or VR environments, and WIMP or gesture interactions.","keywords":["Immersive Analytics, Data Transformation, Data Science, Interaction, Empirical Study, Virtual/Augmented/Mixed Reality"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2309.12168","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ARyWz3510nk&t=0h14m24s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233299602/v-tvcg-20233299602_Preview.mp4?token=YwrZJyFLLJUsc8qH6NZ-PXjTAwC5-rRg_7eZLTrqQbQ&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-tvcg","session_youtube_ff_id":"x7MhNW0QKSo","session_youtube_ff_link":"https://youtu.be/x7MhNW0QKSo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=0h14m24s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T12:42:00Z","title":"This is the Table I Want! Interactive Data Transformation on Desktop and in Virtual Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233322898","abstract":"Visual and interactive machine learning systems (IML) are becoming ubiquitous as they empower individuals with varied machine learning expertise to analyze data. However, it remains complex to align interactions with visual marks to a user\u2019s intent for steering machine learning models. We explore using data and visual design probes to elicit users\u2019 desired interactions to steer ML models via visual encodings within IML interfaces. We conducted an elicitation study with 20 data analysts with varying expertise in ML. We summarize our findings as pairs of target-interaction, which we compare to prior systems to assess the utility of the probes. We additionally surfaced insights about factors influencing how and why participants chose to interact with visual encodings, including refraining from interacting. Finally, we reflect on the value of gathering such formative empirical evidence via data and visual design probes ahead of developing IML prototypes. ","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Anamaria Crisan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Maddie Shang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Eric Brochu"}],"award":"","doi":"10.1109/TVCG.2023.3322898","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"v-tvcg-20233322898","image_caption":"","keywords":["Design Probes, Interactive Machine Learning, Model Steering, Semantic Interaction"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ARyWz3510nk&t=1h4m15s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-tvcg","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=1h4m15s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T13:30:00Z","title":"Eliciting Model Steering Interactions from Users via Data and Visual Design Probes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233334513","abstract":"Data integration is often performed to consolidate information from multiple disparate data sources during visual data analysis. However, integration operations are usually separate from visual analytics operations such as encode and filter in both interface design and empirical research. We conducted a preliminary user study to investigate whether and how data integration should be incorporated directly into the visual analytics process. We used two interface alternatives featuring contrasting approaches to the data preparation and analysis workflow: manual file-based ex-situ integration as a separate step from visual analytics operations; and automatic UI-based in-situ integration merged with visual analytics operations. Participants were asked to complete specific and free-form tasks with each interface, browsing for patterns, generating insights, and summarizing relationships between attributes distributed across multiple files. Analyzing participants' interactions and feedback, we found both task completion time and total interactions to be similar across interfaces and tasks, as well as unique integration strategies between interfaces and emergent behaviors related to satisficing and cognitive bias. Participants' time spent and interactions revealed that in-situ integration enabled users to spend more time on analysis tasks compared with ex-situ integration. Participants' integration strategies and analytical behaviors revealed differences in interface usage for generating and tracking hypotheses and insights. With these results, we synthesized preliminary guidelines for designing future visual analytics interfaces that can support integrating attributes throughout an active analysis process.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Adam Coscia"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ashley Suh"},{"affiliations":"","email":"","is_corresponding":false,"name":"Remco Chang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alex Endert"}],"award":"","doi":"10.1109/TVCG.2023.3334513","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233334513","image_caption":"We studied differences in sensemaking during visual data analysis between manually integrating data with Excel versus automatic integration built-in to a visual analytics interface. We discovered unique analysis strategies with automatic integration, as well as negative effects on tracking insights, satisficing and biased behaviors. We contribute open questions and design guidelines for building future tools that integrate data throughout the visual analytics process. Our data, analysis, and results are all open-source and available at: https://github.com/AdamCoscia/Integration-Guidelines-VA. To read about them, check out our paper!","keywords":["Visual analytics, Data integration, User interface design, Integration strategies, Analytical behaviors."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2403.04757","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ARyWz3510nk&t=0h51m10s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233334513/v-tvcg-20233334513_Preview.mp4?token=Hl9uNjL8zDZnP5-6I9K85S2LiboQcD3u9kavKSJC9JY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233334513/v-tvcg-20233334513_Preview.srt?token=X4-7kksvv3BwCLt9AwOuiXQ1n9hNxJx4AI3eTJky3Mg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-tvcg","session_youtube_ff_id":"8EFRrhaq9Bg","session_youtube_ff_link":"https://youtu.be/8EFRrhaq9Bg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=0h51m10s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T13:18:00Z","title":"Preliminary Guidelines For Combining Data Integration and Visual Data Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233340770","abstract":"We present VoxAR, a method to facilitate an effective visualization of volume-rendered objects in optical see-through head-mounted displays (OST-HMDs). The potential of augmented reality (AR) to integrate digital information into the physical world provides new opportunities for visualizing and interpreting scientific data. However, a limitation of OST-HMD technology is that rendered pixels of a virtual object can interfere with the colors of the real-world, making it challenging to perceive the augmented virtual information accurately. We address this challenge in a two-step approach. First, VoxAR determines an appropriate placement of the volume-rendered object in the real-world scene by evaluating a set of spatial and environmental objectives, managed as user-selected preferences and pre-defined constraints. We achieve a real-time solution by implementing the objectives using a GPU shader language.Next, VoxAR adjusts the colors of the input transfer function (TF) based on the real-world placement region. Specifically, we introduce a novel optimization method that adjusts the TF colors such that the resulting volume-rendered pixels are discernible against the background and the TF maintains the perceptual mapping between the colors and data intensity values. Finally, we present an assessment of our approach through objective evaluations and subjective user studies.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Saeed Boorboor"},{"affiliations":"","email":"","is_corresponding":false,"name":"Matthew S. Castellana"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yoonsang Kim"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zhutian Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Johanna Beyer"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hanspeter Pfister"},{"affiliations":"","email":"","is_corresponding":false,"name":"Arie E. Kaufman"}],"award":"","doi":"10.1109/TVCG.2023.3340770","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233340770","image_caption":"For visualizing a volume-rendered virtual object, in a real-world scene, using an OST-HMD, our framework, VoxAR determines its meaningful placement and, accordingly, adjusts its transfer function (TF) to enhance visibility. A side-by-side comparison is shown of how the data volume rendered with the adjusted TF effectively improves visibility in OST-AR, when augmented in a spatial location determined by VoxAR.","keywords":["Adaptive Visualization, Situated Visualization, Augmented Reality, Volume Rendering"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ARyWz3510nk&t=0h26m57s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233340770/v-tvcg-20233340770_Preview.mp4?token=KXyqwpdkPuPiW1t4Calqu3RvpHF-1AiM5Pl7UjrXVS4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233340770/v-tvcg-20233340770_Preview.srt?token=BdToBMKduiDB8VILXtDqGW7oNa1yOZiUolvNrOb96Aw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full16","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Immersive Visualization and Visual Analytics","session_uid":"v-tvcg","session_youtube_ff_id":"K3ozRzBvwBw","session_youtube_ff_link":"https://youtu.be/K3ozRzBvwBw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ARyWz3510nk&t=0h26m57s","sessions":["Immersive Visualization and Visual Analytics"],"time_stamp":"2024-10-16T12:54:00Z","title":"VoxAR: Adaptive Visualization of Volume Rendered Objects in Optical See-Through Augmented Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1391","abstract":"In volume visualization, visualization synthesis has attracted much attention due to its ability to generate novel visualizations without following the conventional rendering pipeline. However, existing solutions based on generative adversarial networks often require many training images and take significant training time. Still, issues such as low quality, consistency, and flexibility persist. This paper introduces StyleRF-VolVis, an innovative style transfer framework for expressive volume visualization (VolVis) via neural radiance field (NeRF). The expressiveness of StyleRF-VolVis is upheld by its ability to accurately separate the underlying scene geometry (i.e., content) and color appearance (i.e., style), conveniently modify color, opacity, and lighting of the original rendering while maintaining visual content consistency across the views, and effectively transfer arbitrary styles from reference images to the reconstructed 3D scene. To achieve these, we design a base NeRF model for scene geometry extraction, a palette color network to classify regions of the radiance field for photorealistic editing, and an unrestricted color network to lift the color palette constraint via knowledge distillation for non-photorealistic editing. We demonstrate the superior quality, consistency, and flexibility of StyleRF-VolVis by experimenting with various volume rendering scenes and reference images and comparing StyleRF-VolVis against other image-based (AdaIN), video-based (ReReVST), and NeRF-based (ARF and SNeRF) style rendering solutions.","accessible_pdf":false,"authors":[{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"ktang2@nd.edu","is_corresponding":true,"name":"Kaiyuan Tang"},{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"chaoli.wang@nd.edu","is_corresponding":false,"name":"Chaoli Wang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1391","image_caption":"StyleRF-VolVis is an innovative style transfer framework based on the neural radiance field for expressive volume visualization. This framework contains three components: a base NeRF model for ensuring accurate geometry reconstruction, a palette color network to support photorealistic style editing, and an unrestricted color network to achieve non-photorealistic style editing. ","keywords":["Style transfer, neural radiance field, knowledge distillation, volume visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.00150","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-VxwRlkinOQ&t=0h25m50s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1391/v-full-1391_Preview.mp4?token=ITGmgaQ31HqudY__3ys3icjyOy8ieyvto0Z2lDmm_8A&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-full","session_youtube_ff_id":"TTUmK5WKV_w","session_youtube_ff_link":"https://youtu.be/TTUmK5WKV_w","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=0h25m50s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T12:54:00Z","title":"StyleRF-VolVis: Style Transfer of Neural Radiance Fields for Expressive Volume Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1427","abstract":"Numerical simulation serves as a cornerstone in scientific modeling, yet the process of fine-tuning simulation parameters poses significant challenges. Conventionally, parameter adjustment relies on extensive numerical simulations, data analysis, and expert insights, resulting in substantial computational costs and low efficiency. The emergence of deep learning in recent years has provided promising avenues for more efficient exploration of parameter spaces. However, existing approaches often lack intuitive methods for precise parameter adjustment and optimization. To tackle these challenges, we introduce ParamsDrag, a model that facilitates parameter space exploration through direct interaction with visualizations. Inspired by DragGAN, our ParamsDrag model operates in three steps. First, the generative component of ParamsDrag generates visualizations based on the input simulation parameters. Second, by directly dragging structure-related features in the visualizations, users can intuitively understand the controlling effect of different parameters. Third, with the understanding from the earlier step, users can steer ParamsDrag to produce dynamic visual outcomes. Through experiments conducted on real-world simulations and comparisons with state-of-the-art deep learning-based approaches, we demonstrate the efficacy of our solution.","accessible_pdf":true,"authors":[{"affiliations":["Computer Network Information Center, Chinese Academy of Sciences"],"email":"liguan@sccas.cn","is_corresponding":true,"name":"Guan Li"},{"affiliations":["Beijing Forestry University"],"email":"leo_edumail@163.com","is_corresponding":false,"name":"Yang Liu"},{"affiliations":["Computer Network Information Center, Chinese Academy of Sciences"],"email":"sgh@sccas.cn","is_corresponding":false,"name":"Guihua Shan"},{"affiliations":["Chinese Academy of Sciences"],"email":"chengshiyu@cnic.cn","is_corresponding":false,"name":"Shiyu Cheng"},{"affiliations":["Beijing Forestry University"],"email":"weiqun.cao@126.com","is_corresponding":false,"name":"Weiqun Cao"},{"affiliations":["Visa Research"],"email":"junpeng.wang.nk@gmail.com","is_corresponding":false,"name":"Junpeng Wang"},{"affiliations":["National Taiwan Normal University"],"email":"caseywang777@gmail.com","is_corresponding":false,"name":"Ko-Chih Wang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1427","image_caption":"ParamsDrag is a surrogate model developed to enhance the exploration of parameter spaces through direct interaction with visualizations. It allows scientists to intuitively manipulate a feature of interest by dragging it to a desired location within a visualization, subsequently generating the corresponding image. Additionally, ParamsDrag can retrieve the simulation parameters that led to the generation of the selected image, thereby streamlining the process of parameter identification and adjustment.","keywords":["parameter exploration, feature interaction, parameter inversion"],"open_access_supplemental_link":"https://github.com/YangL-04-20/ParamsDrag","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14100","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-VxwRlkinOQ&t=0h38m22s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1427/v-full-1427_Preview.mp4?token=QuVb7a5FWGuhagCSddIouToS9sigElvTzOsQLahtvBE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1427/v-full-1427_Preview.srt?token=T679EKpxR4eqH0-ZThnkpZDa5UXR34a61w341c6xiPk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-full","session_youtube_ff_id":"qD2sZpl6UHU","session_youtube_ff_link":"https://youtu.be/qD2sZpl6UHU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=0h38m22s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T13:06:00Z","title":"ParamsDrag: Interactive Parameter Space Exploration via Image-Space Dragging","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1599","abstract":"Existing deep learning-based surrogate models facilitate efficient data generation, but fall short in uncertainty quantification, efficient parameter space exploration, and reverse prediction. In our work, we introduce SurroFlow, a novel normalizing flow-based surrogate model, to learn the invertible transformation between simulation parameters and simulation outputs. The model not only allows accurate predictions of simulation outcomes for a given simulation parameter but also supports uncertainty quantification in the data generation process. Additionally, it enables efficient simulation parameter recommendation and exploration. We integrate SurroFlow and a genetic algorithm as the backend of a visual interface to support effective user-guided ensemble simulation exploration and visualization. Our framework significantly reduces the computational costs while enhancing the reliability and exploration capabilities of scientific surrogate models.","accessible_pdf":false,"authors":[{"affiliations":["The Ohio State University, Columbus, United States","The Ohio State University, Columbus, United States"],"email":"shen.1250@osu.edu","is_corresponding":false,"name":"JINGYI SHEN"},{"affiliations":["The Ohio State University, Columbus, United States","The Ohio State University, Columbus, United States"],"email":"duan.418@osu.edu","is_corresponding":true,"name":"Yuhan Duan"},{"affiliations":["The Ohio State University , Columbus , United States","The Ohio State University , Columbus , United States"],"email":"hwshen@cse.ohio-state.edu","is_corresponding":false,"name":"Han-Wei Shen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1599","image_caption":"In our work, we introduce SurroFlow, a novel normalizing flow-based surrogate model, to learn the invertible transformation between simulation parameters and simulation outputs. The model not only allows accurate predictions of simulation outcomes for a given simulation parameter but also supports uncertainty quantification in the data generation process. Additionally, it enables reverse prediction of simulation parameters of a given simulation data. We integrate SurroFlow and a genetic algorithm as the backend of a visual interface to support effective user-guided ensemble simulation exploration and visualization. ","keywords":["Surrogate model, normalizing flow, uncertainty quantification, parameter space exploration"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.12884","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-VxwRlkinOQ&t=0h50m2s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1599/v-full-1599_Preview.mp4?token=9J85l_VJt9xibns9hdNhObpUAnkxe-sjC9TK-EcqYqE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1599/v-full-1599_Preview.srt?token=2DOw2qRFLbhWNv0ay10L3whVIBwUNO5pk8QCy7n7q1g&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-full","session_youtube_ff_id":"htK9ytzwcDM","session_youtube_ff_link":"https://youtu.be/htK9ytzwcDM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=0h50m2s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T13:18:00Z","title":"SurroFlow: A Flow-Based Surrogate Model for Parameter Space Exploration and Uncertainty Quantification","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1866","abstract":"Feature grid Scene Representation Networks (SRNs) have been applied to scientific data as compact functional surrogates for analysis and visualization. As SRNs are black-box lossy data representations, assessing the prediction quality is critical for scientific visualization applications to ensure that scientists can trust the information being visualized. Currently, existing architectures do not support inference time reconstruction quality assessment, as coordinate-level errors cannot be evaluated in the absence of ground truth data. By employing the uncertain neural network architecture in feature grid SRNs, we obtain prediction variances during inference time to facilitate confidence-aware data reconstruction. Specifically, we propose a parameter-efficient multi-decoder SRN (MDSRN) architecture consisting of a shared feature grid with multiple lightweight multi-layer perceptron decoders. MDSRN can generate a set of plausible predictions for a given input coordinate to compute the mean as the prediction of the multi-decoder ensemble and the variance as a confidence score. The coordinate-level variance can be rendered along with the data to inform the reconstruction quality, or be integrated into uncertainty-aware volume visualization algorithms. To prevent the misalignment between the quantified variance and the prediction quality, we propose a novel variance regularization loss for ensemble learning that promotes the Regularized multi-decoder SRN (RMDSRN) to obtain a more reliable variance that correlates closely to the true model error. We comprehensively evaluate the quality of variance quantification and data reconstruction of Monte Carlo Dropout (MCD), Mean Field Variational Inference (MFVI), Deep Ensemble (DE), and Predicting Variance (PV) in comparison with our proposed MDSRN and RMDSRN applied to state-of-the-art feature grid SRNs across diverse scalar field datasets. We demonstrate that RMDSRN attains the most accurate data reconstruction and competitive variance-error correlation among uncertain SRNs under the same neural network parameter budgets. Furthermore, we present an adaptation of uncertainty-aware volume rendering and shed light on the potential of incorporating uncertain predictions in improving the quality of volume rendering for uncertain SRNs. Through ablation studies on the regularization strength and decoder count, we show that MDSRN and RMDSRN are expected to perform sufficiently well with a default configuration without requiring customized hyperparameter settings for different datasets.","accessible_pdf":false,"authors":[{"affiliations":["The Ohio State University, Columbus, United States"],"email":"xiong.336@osu.edu","is_corresponding":true,"name":"Tianyu Xiong"},{"affiliations":["The Ohio State University, Columbus, United States"],"email":"wurster.18@osu.edu","is_corresponding":false,"name":"Skylar Wolfgang Wurster"},{"affiliations":["The Ohio State University, Columbus, United States","Argonne National Laboratory, Lemont, United States"],"email":"guo.2154@osu.edu","is_corresponding":false,"name":"Hanqi Guo"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"tpeterka@mcs.anl.gov","is_corresponding":false,"name":"Tom Peterka"},{"affiliations":["The Ohio State University , Columbus , United States"],"email":"hwshen@cse.ohio-state.edu","is_corresponding":false,"name":"Han-Wei Shen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1866","image_caption":"By training multiple lightweight decoders and combining a variance regularization in the loss function, regularized multi-decoder SRN (RMDSRN) enables any feature grid SRN to produce uncertain predictions, such that a variance can be computed and visualized for post-training prediction quality assessment. Thanks to the variance regularization, the variances are more likely to resemble the spatial patterns of the actual prediction errors, which are inaccessible during inference time.","keywords":["Scene representation network, deep learning, scientific visualization, ensemble learning"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-VxwRlkinOQ&t=1h3m1s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1866/v-full-1866_Preview.mp4?token=JvYFoDniHtLPq0JNoRUiG6oNdGm3_ZCRaIqpCOhs9yY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1866/v-full-1866_Preview.srt?token=7-modfywyiOsocXo-RX1xgkeCSva1HmuLauuXK2XFW0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-full","session_youtube_ff_id":"Kx3B9acBnOw","session_youtube_ff_link":"https://youtu.be/Kx3B9acBnOw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=1h3m1s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T13:30:00Z","title":"Regularized Multi-Decoder Ensemble for an Error-Aware Scene Representation Network","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233345373","abstract":"Traditional deep learning algorithms assume that all data is available during training, which presents challenges when handling large-scale time-varying data. To address this issue, we propose a data reduction pipeline called knowledge distillation-based implicit neural representation (KD-INR) for compressing large-scale time-varying data. The approach consists of two stages: spatial compression and model aggregation. In the first stage, each time step is compressed using an implicit neural representation with bottleneck layers and features of interest preservation-based sampling. In the second stage, we utilize an offline knowledge distillation algorithm to extract knowledge from the trained models and aggregate it into a single model. We evaluated our approach on a variety of time-varying volumetric data sets. Both quantitative and qualitative results, such as PSNR, LPIPS, and rendered images, demonstrate that KD-INR surpasses the state-of-the-art approaches, including learning-based (i.e., CoordNet, NeurComp, and SIREN) and lossy compression (i.e., SZ3, ZFP, and TTHRESH) methods, at various compression ratios ranging from hundreds to ten thousand.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Jun Han"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hao Zheng"},{"affiliations":"","email":"","is_corresponding":false,"name":"Change Bi"}],"award":"","doi":"10.1109/TVCG.2023.3345373","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233345373","image_caption":"We propose KD-INR, a knowledge distillation-based implicit neural representation, enabling to sequentially compress time-varying data with memory effciency.","keywords":["Time-varying data compression, implicit neural representation, knowledge distillation, volume visualization."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-VxwRlkinOQ&t=0h0m38s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233345373/v-tvcg-20233345373_Preview.mp4?token=G8qsRJ43TvtKl9ixMm-0EzulO0vYdyrc45e6mkBK4Kk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233345373/v-tvcg-20233345373_Preview.srt?token=TxuFh0QqKDQp8HO0I4LFp4rEB6p1OeuIylrsWpoU994&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"wPUZtAngZUk","session_youtube_ff_link":"https://youtu.be/wPUZtAngZUk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=0h0m38s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T12:30:00Z","title":"KD-INR: Time-Varying Volumetric Data Compression via Knowledge Distillation-based Implicit Neural Representation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243365089","abstract":"Implicit Neural representations (INRs) are widely used for scientific data reduction and visualization by modeling the function that maps a spatial location to a data value. Without any prior knowledge about the spatial distribution of values, we are forced to sample densely from INRs to perform visualization tasks like iso-surface extraction which can be very computationally expensive. Recently, range analysis has shown promising results in improving the efficiency of geometric queries, such as ray casting and hierarchical mesh extraction, on INRs for 3D geometries by using arithmetic rules to bound the output range of the network within a spatial region. However, the analysis bounds are often too conservative for complex scientific data. In this paper, we present an improved technique for range analysis by revisiting the arithmetic rules and analyzing the probability distribution of the network output within a spatial region. We model this distribution efficiently as a Gaussian distribution by applying the central limit theorem. Excluding low probability values, we are able to tighten the output bounds, resulting in a more accurate estimation of the value range, and hence more accurate identification of iso-surface cells and more efficient iso-surface extraction on INRs. Our approach demonstrates superior performance in terms of the iso-surface extraction time on four datasets compared to the original range analysis method and can also be generalized to other geometric query tasks.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Haoyu Li"},{"affiliations":"","email":"","is_corresponding":false,"name":"Han-Wei Shen"}],"award":"","doi":"10.1109/TVCG.2024.3365089","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243365089","image_caption":"This image shows the iso-surface extraction results comparison between our approach on the right and the traditional approach on the left. We can only observe minor differences between them. The statistics of the missed iso-surface components also suggest our method preserves the accuracy while being much more efficient than the traditional iso-surface extraction method.","keywords":["Iso-surface extraction, implicit neural representation, uncertainty propagation, affine arithmetic."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2402.13861","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/-VxwRlkinOQ&t=0h14m11s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243365089/v-tvcg-20243365089_Preview.mp4?token=YSsQqjdhLKylaN5IeHQC47JZTglIl1_TAOSsSfKkdvc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243365089/v-tvcg-20243365089_Preview.srt?token=phWo3Qr_Bff0OgzJr7-XKTgDhv37s7bU3np7TbOOGmg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full17","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Machine Learning for Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"UoEnrW69xCE","session_youtube_ff_link":"https://youtu.be/UoEnrW69xCE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/-VxwRlkinOQ&t=0h14m11s","sessions":["Machine Learning for Visualization"],"time_stamp":"2024-10-16T12:42:00Z","title":"Improving Efficiency of Iso-Surface Extraction on Implicit Neural Representations Using Uncertainty Propagation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1185","abstract":"This paper presents an interactive technique to explain visual patterns in network visualizations to analysts who do not understand these visualizations and who are learning to read them. Learning a visualization requires mastering its visual grammar and decoding information presented through visual marks, graphical encodings, and spatial configurations. To help people learn network visualization designs and extract meaningful information, we introduce the concept of interactive pattern explanation that allows viewers to select an arbitrary area in a visualization, then automatically mines the underlying data patterns, and explains both visual and data patterns present in the viewer\u2019s selection. In a qualitative and a quantitative user study with a total of 32 participants, we compare interactive pattern explanations to textual-only and visual-only (cheatsheets) explanations. Our results show that interactive explanations increase learning of i) unfamiliar visualizations, ii) patterns in network science, and iii) the respective network terminology.","accessible_pdf":false,"authors":[{"affiliations":["Newcastle University, Newcastle Upon Tyne, United Kingdom","University of Edinburgh, Edinburgh, United Kingdom"],"email":"xinhuan.shu@gmail.com","is_corresponding":true,"name":"Xinhuan Shu"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"alexis.pister@hotmail.com","is_corresponding":false,"name":"Alexis Pister"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"tangjunxiu@zju.edu.cn","is_corresponding":false,"name":"Junxiu Tang"},{"affiliations":["University of Toronto, Toronto, Canada"],"email":"fanny@dgp.toronto.edu","is_corresponding":false,"name":"Fanny Chevalier"},{"affiliations":["Inria, Bordeaux, France","University of Edinburgh, Edinburgh, United Kingdom"],"email":"bbach@inf.ed.ac.uk","is_corresponding":false,"name":"Benjamin Bach"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1185","image_caption":"We propose Pattern Explainer to help analysts who are unfamiliar with network visualizations learn about visual patterns in the representation of their data. Looking at the visualization, a user spots a visual pattern of interest, e.g. a \u201cbug\u201d-looking pattern in the matrix. To inquire about whether this pattern is meaningful, the user selects the area. Pattern Explainer then automatically mines the selection, against a dictionary of network motifs, and provides the user with explanations of what underlying network patterns the visual pattern reveals.","keywords":["Visualization education, network visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/pdf/2408.01272","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/uw_DXYjpu24&t=0h26m22s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1185/v-full-1185_Preview.mp4?token=Cgmabne65gfCNXK-NMIeiuk8fYX4EieVqafAEjZboQ8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1185/v-full-1185_Preview.srt?token=QBN_50P54Pb6D33gG6W1ws6A2HHVjLAE4HfhX4NEALs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-full","session_youtube_ff_id":"XYAcTewN_E8","session_youtube_ff_link":"https://youtu.be/XYAcTewN_E8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=0h26m22s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T12:54:00Z","title":"Does This Have a Particular Meaning?: Interactive Pattern Explanation for Network Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1606","abstract":"With the increase of graph size, it becomes difficult or even impossible to visualize graph structures clearly within the limited screen space. Consequently, it is crucial to design effective visual representations for large graphs. In this paper, we propose AdaMotif, a novel approach that can capture the essential structure patterns of large graphs and effectively reveal the overall structures via adaptive motif designs. Specifically, our approach involves partitioning a given large graph into multiple subgraphs, then clustering similar subgraphs and extracting similar structural information within each cluster. Subsequently, adaptive motifs representing each cluster are generated and utilized to replace the corresponding subgraphs, leading to a simplified visualization. Our approach aims to preserve as much information as possible from the subgraphs while simplifying the graph efficiently. Notably, our approach successfully visualizes crucial community information within a large graph. We conduct case studies and a user study using real-world graphs to validate the effectiveness of our proposed approach. The results demonstrate the capability of our approach in simplifying graphs while retaining important structural and community information.","accessible_pdf":false,"authors":[{"affiliations":["Shenzhen University, Shenzhen, China"],"email":"hzhou@szu.edu.cn","is_corresponding":true,"name":"Hong Zhou"},{"affiliations":["Shenzhen University, Shenzhen, China"],"email":"laipeifeng1111@gmail.com","is_corresponding":false,"name":"Peifeng Lai"},{"affiliations":["Shenzhen University, Shenzhen, China"],"email":"zhida.sun@connect.ust.hk","is_corresponding":false,"name":"Zhida Sun"},{"affiliations":["Shenzhen University, Shenzhen, China"],"email":"2310274034@email.szu.edu.cn","is_corresponding":false,"name":"Xiangyuan Chen"},{"affiliations":["Shenzhen University, Shen Zhen, China"],"email":"275621136@qq.com","is_corresponding":false,"name":"Yang Chen"},{"affiliations":["Shenzhen University, Shenzhen, China"],"email":"hswu@szu.edu.cn","is_corresponding":false,"name":"Huisi Wu"},{"affiliations":["Nanyang Technological University, Singapore, Singapore"],"email":"yong-wang@ntu.edu.sg","is_corresponding":false,"name":"Yong WANG"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1606","image_caption":"Case analysis of the Cpan dataset: (a) the original graph; (b) our AdaMotif. The highlighted areas of each subfigure show the enlarged communities. We highlight identical communities for comparison. The identical communities are marked using \"The same community\". In (a), to make communities easier to identify, their nodes and edges are highlighted in blue and red, respectively. In (b), motifs with the same color and similar shape represent similar communities. The size of the motif indicates the number of nodes in this community. Our result provides a clearer expression of community information.","keywords":["Graph visualization, node-link diagrams, graph simplification"],"open_access_supplemental_link":"https://osf.io/pb8t3/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/uw_DXYjpu24&t=0h51m6s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1606/v-full-1606_Preview.mp4?token=XNAI--kxzKJbfnEWfEQn-z-dQs-XeCUps7OttO-Ri_8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1606/v-full-1606_Preview.srt?token=yyU1vwaX2Yxq1I7tJ61fvdARcrkp7cxtk-SnmGbGUKw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-full","session_youtube_ff_id":"gWWaEplNEMQ","session_youtube_ff_link":"https://youtu.be/gWWaEplNEMQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=0h51m6s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T13:18:00Z","title":"AdaMotif: Graph Simplification via Adaptive Motif Design","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1693","abstract":"We introduce a visual analysis method for multiple causal graphs with different outcome variables, namely, multi-outcome causal graphs. Multi-outcome causal graphs are important in healthcare for understanding multimorbidity and comorbidity. To support the visual analysis, we collaborated with medical experts to devise two comparative visualization techniques at different stages of the analysis process. First, a progressive visualization method is proposed for comparing multiple state-of-the-art causal discovery algorithms. The method can handle mixed-type datasets comprising both continuous and categorical variables and assist in the creation of a fine-tuned causal graph of a single outcome. Second, a comparative graph layout technique and specialized visual encodings are devised for the quick comparison of multiple causal graphs. In our visual analysis approach, analysts start by building individual causal graphs for each outcome variable, and then, multi-outcome causal graphs are generated and visualized with our comparative technique for analyzing differences and commonalities of these causal graphs. Evaluation includes quantitative measurements on benchmark datasets, a case study with a medical expert, and expert user studies with real-world health research data.","accessible_pdf":true,"authors":[{"affiliations":["Institute of Medical Technology, Peking University Health Science Center, Beijing, China","National Institute of Health Data Science, Peking University, Beijing, China"],"email":"mengjiefan@bjmu.edu.cn","is_corresponding":true,"name":"Mengjie Fan"},{"affiliations":["Chalmers University of Technology, Gothenburg, Sweden","Peking University, Beijing, China"],"email":"yu.jinlu@qq.com","is_corresponding":false,"name":"Jinlu Yu"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"},{"affiliations":["Tongji College of Design and Innovation, Shanghai, China"],"email":"nan.cao@gmail.com","is_corresponding":false,"name":"Nan Cao"},{"affiliations":["Beijing University of Chinese Medicine, Beijing, China"],"email":"wanghuaiyuelva@126.com","is_corresponding":false,"name":"Huaiyu Wang"},{"affiliations":["Peking University, Beijing, China"],"email":"zhoulng@pku.edu.cn","is_corresponding":false,"name":"Liang Zhou"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1693","image_caption":"The case study of the UK Biobank data with a medical expert using our method. In the first stage of \"single causal graph analysis\" (1\u20134), the expert explores and edits single causal graphs using the progressive comparative visualization of three state-of-the-art causal discovery techniques (2-4) in combination with her domain knowledge. In the second stage of \"multi-outcome causal graphs comparison\" (5, 6), she selects graphs of interested outcome for comparison using various layouts, including the supergraph (5), and our new comparable layout for subgraphs (6). ","keywords":["Causal graph visualization and visual analysis, causal discovery, comparative visualization, visual analysis in medicine"],"open_access_supplemental_link":"https://github.com/mengjiefan/multi_outcome/tree/vis_rev_sub","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.02679","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/uw_DXYjpu24&t=0h0m57s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1693/v-full-1693_Preview.mp4?token=ovYGwzD9MYXQVpPzgVA8YPTLXY9HO1ombY8hfQ3Uh48&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-full","session_youtube_ff_id":"bu5PgW9Q6Kg","session_youtube_ff_link":"https://youtu.be/bu5PgW9Q6Kg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=0h0m57s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T12:30:00Z","title":"Visual Analysis of Multi-outcome Causal Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1746","abstract":"Hypergraphs provide a natural way to represent polyadic relationships in network data. For large hypergraphs, it is often difficult to visually detect structures within the data. Recently, a scalable polygon-based visualization approach was developed allowing hypergraphs with thousands of hyperedges to be simplified and examined at different levels of detail. However, this approach is not guaranteed to eliminate all of the visual clutter caused by unavoidable overlaps. Furthermore, meaningful structures can be lost at simplified scales, making their interpretation unreliable. In this paper, we define hypergraph structures using the bipartite graph representation, allowing us to decompose the hypergraph into a union of structures including topological blocks, bridges, and branches, and to identify exactly where unavoidable overlaps must occur. We also introduce a set of topology preserving and topology altering atomic operations, enabling the preservation of important structures while reducing unavoidable overlaps to improve visual clarity and interpretability in simplified scales. We demonstrate our approach in several real-world applications.","accessible_pdf":false,"authors":[{"affiliations":["Oregon State University, Corvallis, United States"],"email":"oliverpe@oregonstate.edu","is_corresponding":false,"name":"Peter D Oliver"},{"affiliations":["Oregon State University, Corvallis, United States"],"email":"zhange@eecs.oregonstate.edu","is_corresponding":false,"name":"Eugene Zhang"},{"affiliations":["Oregon State University, Corvallis, United States"],"email":"zhangyue@oregonstate.edu","is_corresponding":false,"name":"Yue Zhang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1746","image_caption":"We present a structure-guided simplification scheme for hypergraphs. Given an input hypergraph (left), we identify a cycle basis for its bipartite graph representation (middle). Using the basis cycles, we decompose the hypergraph into a union of topological blocks (purple bubbles), bridges, and branches (green bubbles). We apply minimal cycle collapse and cycle cut simplifications to eliminate unavoidable overlaps in the topological blocks, and apply leaf pruning simplifications to reduce the space required by bridges and branches. Our simplification prioritizes preserving long cycles, bridges, and branches so that the most significant structures are kept in the simplified results (right).","keywords":["Hypergraph Visualization, Hypergraph Simplification, Hypergraph Topology, Bipartite Representation"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.19621","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/uw_DXYjpu24&t=0h14m18s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1746/v-full-1746_Preview.mp4?token=ZRuLy-4QoGhRaO17UYqa07iEi1dYdlexH3G8P14LWxU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-full","session_youtube_ff_id":"kP6irewadAE","session_youtube_ff_link":"https://youtu.be/kP6irewadAE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=0h14m18s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T12:42:00Z","title":"Structure-Aware Simplification for Hypergraph Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1831","abstract":"When using exploratory visual analysis to examine multivariate hierarchical data, users often need to query data to narrow down the scope of analysis. However, formulating effective query expressions remains a challenge for multivariate hierarchical data, particularly when datasets become very large. To address this issue, we develop a declarative grammar,HiRegEx (Hierarchical data Regular Expression), for querying and exploring multivariate hierarchical data. Rooted in the extended multi-level task topology framework for tree visualizations (e-MLTT), HiRegEx delineates three query targets (node, path, and subtree) and two aspects for querying these targets (features and positions), and uses operators developed based on classical regular expressions for query construction. Based on the HiRegEx grammar, we develop an exploratory framework for querying and exploring multivariate hierarchical data and integrate it into the TreeQueryER prototype system. The exploratory framework includes three major components: top-down pattern specification, bottom-up data-driven inquiry, and context-creation data overview. We validate the expressiveness of HiRegEx with the tasks from the e-MLTT framework and showcase the utility and effectiveness ofTreeQueryER system through a case study involving expert users in the analysis of a citation tree dataset.","accessible_pdf":false,"authors":[{"affiliations":["Beijing Institute of Technology, Beijing, China"],"email":"guozhg.li@gmail.com","is_corresponding":false,"name":"Guozheng Li"},{"affiliations":["Beijing Institute of Technology, Beijing, China"],"email":"haotian.mi1@gmail.com","is_corresponding":false,"name":"haotian mi"},{"affiliations":["Beijing Institute of Technology, Beijing, China"],"email":"liuchi02@gmail.com","is_corresponding":false,"name":"Chi Harold Liu"},{"affiliations":["Ochanomizu University, Tokyo, Japan"],"email":"itot@is.ocha.ac.jp","is_corresponding":false,"name":"Takayuki Itoh"},{"affiliations":["Beijing Institute of Technology, Beijing, China"],"email":"wanggrbit@126.com","is_corresponding":false,"name":"Guoren Wang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1831","image_caption":"The exploratory framework for querying multivariate hierarchical data comprises three modes: top-down, bottom-up, and context-creation. The top-down mode starts from a clear query task. Users construct the corresponding query expression through direct manipulations interactively. The bottom-up mode recommends related query expressions based on the initial expression and the multivariate hierarchical data collection. The context-creation mode offers users an overview of the entire hierarchical data collection. Modules associated with the top-down, bottom-up, and context creation modes in the framework are denoted by red, orange, and blue triangles. ","keywords":["Multivariate hierarchical data, declarative grammar, visual query"],"open_access_supplemental_link":"https://github.com/bitvis2021/HiRegEx","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/uw_DXYjpu24&t=1h5m34s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1831/v-full-1831_Preview.mp4?token=n9QVeFNmeHdmGxNyALHyOSelyM2wK96nkpNFEk5JK20&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1831/v-full-1831_Preview.srt?token=ZgipMgJhWQR58B78D58zTFJOIAOOffNjTYK6RJMyoOA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-full","session_youtube_ff_id":"7q67dSgbZCI","session_youtube_ff_link":"https://youtu.be/7q67dSgbZCI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=1h5m34s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T13:30:00Z","title":"HiRegEx: Interactive Visual Query and Exploration of Multivariate Hierarchical Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233306356","abstract":"A multitude of studies have been conducted on graph drawing, but many existing methods only focus on optimizing a single aesthetic aspect of graph layouts. There are a few existing methods that attempt to develop a flexible solution for optimizing different aesthetic aspects measured by different aesthetic criteria. Furthermore, thanks to the significant advance in deep learning techniques, several deep learning-based layout methods were proposed recently, which have demonstrated the advantages of the deep learning approaches for graph drawing. However, none of these existing methods can be directly applied to optimizing non-differentiable criteria without special accommodation. In this work, we propose a novel Generative Adversarial Network (GAN) based deep learning framework for graph drawing, called SmartGD, which can optimize any quantitative aesthetic goals even though they are non-differentiable. In the cases where the aesthetic goal is too abstract to be described mathematically, SmartGD can draw graphs in a similar style as a collection of good layout examples, which might be selected by humans based on the abstract aesthetic goal. To demonstrate the effectiveness and efficiency of SmartGD, we conduct experiments on minimizing stress, minimizing edge crossing, maximizing crossing angle, and a combination of multiple aesthetics. Compared with several popular graph drawing algorithms, the experimental results show that SmartGD achieves good performance both quantitatively and qualitatively.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Xiaoqi Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kevin Yen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yifan Hu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Han-Wei Shen"}],"award":"","doi":"10.1109/TVCG.2023.3306356","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233306356","image_caption":"SmartGD is a novel deep-learning framework for graph drawing, which can optimize any quantitative aesthetics. It is a GAN-based framework in which the generator learns to draw graphs, and the discriminator serves as a judge of the layout quality. Also, we introduce a unique self-challenging mechanism that continuously improves the quality of real layouts during training. Feel free to check our paper and code for more details.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/uw_DXYjpu24&t=0h38m53s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233306356/v-tvcg-20233306356_Preview.mp4?token=4FeNN97BbtmlhvsZMB10GJ5NdeysTGNjtZGcEi2MjHk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233306356/v-tvcg-20233306356_Preview.srt?token=e0TXzPdvcYtaKAftmwaAOpQueUFI2YNUVbmpC3fSPn8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full18","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Where the Networks Are","session_uid":"v-tvcg","session_youtube_ff_id":"o-j0BsCaXoU","session_youtube_ff_link":"https://youtu.be/o-j0BsCaXoU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/uw_DXYjpu24&t=0h38m53s","sessions":["Where the Networks Are"],"time_stamp":"2024-10-18T13:06:00Z","title":"SmartGD: A GAN-Based Graph Drawing Framework for Diverse Aesthetic Goals","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1395","abstract":"Onboarding a user to a visualization dashboard entails explaining its various components, including the chart types used, the data loaded, and the interactions available. Authoring such an onboarding experience is time-consuming and requires significant knowledge and little guidance on how best to complete this task. Depending on their levels of expertise, end users being onboarded to a new dashboard can be either confused and overwhelmed or disinterested and disengaged. We propose interactive dashboard tours (D-Tours) as semi-automated onboarding experiences that preserve the agency of users with various levels of expertise to keep them interested and engaged. Our interactive tours concept draws from open-world game design to give the user freedom in choosing their path through onboarding. We have implemented the concept in a tool called D-TOUR PROTOTYPE, which allows authors to craft custom interactive dashboard tours from scratch or using automatic templates. Automatically generated tours can still be customized to use different media (e.g., video, audio, and highlighting) or new narratives to produce an onboarding experience tailored to an individual user. We demonstrate the usefulness of interactive dashboard tours through use cases and expert interviews. Our evaluation shows that authors found the automation in the D-Tour Prototype helpful and time-saving, and users found the created tours engaging and intuitive. This paper and all supplemental materials are available at https://osf.io/6fbjp/.","accessible_pdf":false,"authors":[{"affiliations":["Pro2Future GmbH, Linz, Austria","Johannes Kepler University, Linz, Austria"],"email":"vaishali.dhanoa@pro2future.at","is_corresponding":true,"name":"Vaishali Dhanoa"},{"affiliations":["Johannes Kepler University, Linz, Austria"],"email":"andreas.hinterreiter@jku.at","is_corresponding":false,"name":"Andreas Hinterreiter"},{"affiliations":["Johannes Kepler University, Linz, Austria"],"email":"vanessa.fediuk@jku.at","is_corresponding":false,"name":"Vanessa Fediuk"},{"affiliations":["Aarhus University, Aarhus, Denmark"],"email":"elm@cs.au.dk","is_corresponding":false,"name":"Niklas Elmqvist"},{"affiliations":["Institute of Visual Computing "," Human-Centered Technology, Vienna, Austria"],"email":"groeller@cg.tuwien.ac.at","is_corresponding":false,"name":"Eduard Gr\u00f6ller"},{"affiliations":["Johannes Kepler University Linz, Linz, Austria"],"email":"marc.streit@jku.at","is_corresponding":false,"name":"Marc Streit"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1395","image_caption":"D-Tour Prototype Authoring Mode. Authors pick (a) automatically extracted visualization categories, General, Insight, or Interaction from the Content Extraction View and drag them to the Content Arrangement View, where they (b) arrange them, (b.1) thus crafting a tour and (b.2) adding explanations to the tour content. In the Dissemination View they (c) test changes before disseminating them. A selection of the Column Chart General in the Content Extraction View is shown which is highlighted in the Content Arrangement View and in the Dissemination View. Its associated content can be seen in (b.2)","keywords":["Dashboards, onboarding, storytelling, tutorial, interactive tours, open-world games"],"open_access_supplemental_link":"https://osf.io/6fbjp/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/t5m3u","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/NWNMgWnT7NM&t=0h37m24s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1395/v-full-1395_Preview.mp4?token=V1XLccn7O78RyqyQxzrfDBtx39_DAamtvybdh6UTnM0&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-full","session_youtube_ff_id":"S6366DrJQTs","session_youtube_ff_link":"https://youtu.be/S6366DrJQTs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h37m24s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T13:18:00Z","title":"D-Tour: Semi-Automatic Generation of Interactive Guided Tours for Visualization Dashboard Onboarding","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1416","abstract":"Various data visualization applications such as reverse engineering and interactive authoring require a vocabulary that describes the structure of visualization scenes and the procedure to manipulate them. A few scene abstractions have been proposed, but they are restricted to specific applications for a limited set of visualization types. A unified and expressive model of data visualization scenes for different applications has been missing. To fill this gap, we present Manipulable Semantic Components (MSC), a computational representation of data visualization scenes, to support applications in scene understanding and augmentation. MSC consists of two parts: a unified object model describing the structure of a visualization scene in terms of semantic components, and a set of operations to generate and modify the scene components. We demonstrate the benefits of MSC in three applications: visualization authoring, visualization deconstruction and reuse, and animation specification.","accessible_pdf":false,"authors":[{"affiliations":["University of Maryland, College Park, United States"],"email":"leozcliu@umd.edu","is_corresponding":false,"name":"Zhicheng Liu"},{"affiliations":["University of Maryland, College Park, United States"],"email":"cchen24@umd.edu","is_corresponding":true,"name":"Chen Chen"},{"affiliations":["University of Maryland, College Park, United States"],"email":"hookerj100@gmail.com","is_corresponding":false,"name":"John Hooker"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1416","image_caption":"We present Manipulable Semantic Components (MSC), a computational representation of data visualization scenes. MSC consists of two parts: a unified object model describing the structure of a visualization scene, and a set of operations to generate and modify the scene components. We demonstrate the benefits of MSC in three case studies.","keywords":["data visualization, scene abstraction, visualization model"],"open_access_supplemental_link":"https://mascot-vis.github.io/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/NWNMgWnT7NM&t=0h50m1s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1416/v-full-1416_Preview.mp4?token=KRrGLYZSqDhpGaEVGwui2BSZujcpuzBKO2MyQZHZ4OU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1416/v-full-1416_Preview.srt?token=A1hctGLK3p57jh9I2IfrtOvzUOMN5Es0ScMbfjuDbR4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-full","session_youtube_ff_id":"4IYhlRFnM64","session_youtube_ff_link":"https://youtu.be/4IYhlRFnM64","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h50m1s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T13:30:00Z","title":"Manipulable Semantic Components: a Computational Representation of Data Visualization Scenes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1472","abstract":"Trained on vast corpora, Large Language Models (LLMs) have the potential to encode visualization design knowledge and best practices. However, if they fail to do so, they might provide unreliable visualization recommendations. What visualization design preferences, then, have LLMs learned? We contribute DracoGPT, a method for extracting, modeling, and assessing visualization design preferences from LLMs. To assess varied tasks, we develop two pipelines--DracoGPT-Rank and DracoGPT-Recommend--to model LLMs prompted to either rank or recommend visual encoding specifications. We use Draco as a shared knowledge base in which to represent LLM design preferences and compare them to best practices from empirical research. We demonstrate that DracoGPT can accurately model the preferences expressed by LLMs, enabling analysis in terms of Draco design constraints. Across a suite of backing LLMs, we find that DracoGPT-Rank and DracoGPT-Recommend moderately agree with each other, but both substantially diverge from guidelines drawn from human subjects experiments. Future work can build on our approach to expand Draco's knowledge base to model a richer set of preferences and to provide a robust and cost-effective stand-in for LLMs.","accessible_pdf":false,"authors":[{"affiliations":["University of Washington, Seattle, United States"],"email":"wwill@cs.washington.edu","is_corresponding":true,"name":"Huichen Will Wang"},{"affiliations":["University of Washington, Seattle, United States"],"email":"mgord@cs.stanford.edu","is_corresponding":false,"name":"Mitchell L. Gordon"},{"affiliations":["University of Washington, Seattle, United States"],"email":"leibatt@cs.washington.edu","is_corresponding":false,"name":"Leilani Battle"},{"affiliations":["University of Washington, Seattle, United States"],"email":"jheer@uw.edu","is_corresponding":false,"name":"Jeffrey Heer"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1472","image_caption":"DracoGPT is a method for extracting, modeling, and assessing visualization design preferences from LLMs. We develop two pipelines--DracoGPT-Rank and DracoGPT-Recommend--to model LLMs prompted to either rank or recommend visual encoding specifications. We use Draco as a shared knowledge base in which to represent LLM design preferences and compare them to best practices from empirical research. The image shown summarizes the pipeline for DracoGPT-Rank.","keywords":["Visualization, Large Language Models, Visualization Recommendation, Graphical Perception"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/NWNMgWnT7NM&t=0h13m11s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1472/v-full-1472_Preview.mp4?token=kwZ9vXZXTmgiZroZv-H25sDIQX0qQpBLBTsHkJ0-Xw8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-full","session_youtube_ff_id":"Y-lg3iu3-o4","session_youtube_ff_link":"https://youtu.be/Y-lg3iu3-o4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h13m11s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T12:42:00Z","title":"DracoGPT: Extracting Visualization Design Preferences from Large Language Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233316469","abstract":"Automated visualization recommendation facilitates the rapid creation of effective visualizations, which is especially beneficial for users with limited time and limited knowledge of data visualization. There is an increasing trend in leveraging machine learning (ML) techniques to achieve an end-to-end visualization recommendation. However, existing ML-based approaches implicitly assume that there is only one appropriate visualization for a specific dataset, which is often not true for real applications. Also, they often work like a black box, and are difficult for users to understand the reasons for recommending specific visualizations. To fill the research gap, we propose AdaVis, an adaptive and explainable approach to recommend one or multiple appropriate visualizations for a tabular dataset. It leverages a box embedding-based knowledge graph to well model the possible one-to-many mapping relations among different entities (i.e., data features, dataset columns, datasets, and visualization choices). The embeddings of the entities and relations can be learned from dataset-visualization pairs. Also, AdaVis incorporates the attention mechanism into the inference framework. Attention can indicate the relative importance of data features for a dataset and provide fine-grained explainability. Our extensive evaluations through quantitative metric evaluations, case studies, and user interviews demonstrate the effectiveness of AdaVis.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Songheng Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yong Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haotian Li"},{"affiliations":"","email":"","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"10.1109/TVCG.2023.3316469","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233316469","image_caption":"The figures show four pairs of visualizations recommended for four different datasets. Visualizations in the same column are for the same dataset. The explanation of the recommendation results is at the bottom. The top two features are described in the explanations to illustrate the recommendation results.","keywords":["Visualization Recommendation, Logical Reasoning, Data Visualization, Knowledge Graph"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/NWNMgWnT7NM&t=0h1m11s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233316469/v-tvcg-20233316469_Preview.mp4?token=wCwBEW6oJAqCGaF_7k5HslvCKP45i5gj1POdYH9qKtc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233316469/v-tvcg-20233316469_Preview.srt?token=djcHDOkGOsLAPdzbvC_3CO12Lcv1PjuJHjIGl_j0bV4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-tvcg","session_youtube_ff_id":"84XqN9j09X0","session_youtube_ff_link":"https://youtu.be/84XqN9j09X0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h1m11s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T12:30:00Z","title":"AdaVis: Adaptive and Explainable Visualization Recommendation for Tabular Data'","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243374571","abstract":"Visualization Recommendation Systems (VRSs) are a novel and challenging field of study aiming to help generate insightful visualizations from data and support non-expert users in information discovery. Among the many contributions proposed in this area, some systems embrace the ambitious objective of imitating human analysts to identify relevant relationships in data and make appropriate design choices to represent these relationships with insightful charts. We denote these systems as \"agnostic\" VRSs since they do not rely on human-provided constraints and rules but try to learn the task autonomously. Despite the high application potential of agnostic VRSs, their progress is hindered by several obstacles, including the absence of standardized datasets to train recommendation algorithms, the difficulty of learning design rules, and defining quantitative criteria for evaluating the perceptual effectiveness of generated plots. This paper summarizes the literature on agnostic VRSs and outlines promising future research directions.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Luca Podo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Bardh Prenkaj"},{"affiliations":"","email":"","is_corresponding":false,"name":"Paola Velardi"}],"award":"","doi":"10.1109/TVCG.2024.3374571","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243374571","image_caption":"Workflow of Agnostic Visual Recommender Systems (A-VRSs): First (Figure 1 up), the model is trained with data-visualization pairs, to learn both to identify relevant relationships between data and to visualize them in the best possible way. Next (Figure 1 down), the learned model recommends a set of possibly insightful visualizations from new datasets at inference time.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/NWNMgWnT7NM&t=0h26m44s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243374571/v-tvcg-20243374571_Preview.mp4?token=KLfl87EHjbGIpm2yPdsmIpwf2cM0nXpQgl2CbVHhXKA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243374571/v-tvcg-20243374571_Preview.srt?token=K44xpC8trhsmHcAEor-rRfBqY382d4LraLLQEK7M2gY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-tvcg","session_youtube_ff_id":"qDYK_aAqIW8","session_youtube_ff_link":"https://youtu.be/qDYK_aAqIW8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h26m44s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T13:06:00Z","title":"Agnostic Visual Recommendation Systems: Open Challenges and Future Directions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243383089","abstract":"The advances in AI-enabled techniques have accelerated the creation and automation of visualizations in the past decade. However, presenting visualizations in a descriptive and generative format remains a challenge. Moreover, current visualization embedding methods focus on standalone visualizations, neglecting the importance of contextual information for multi-view visualizations. To address this issue, we propose a new representation model, Chart2Vec, to learn a universal embedding of visualizations with context-aware information. Chart2Vec aims to support a wide range of downstream visualization tasks such as recommendation and storytelling. Our model considers both structural and semantic information of visualizations in declarative specifications. To enhance the context-aware capability, Chart2Vec employs multi-task learning on both supervised and unsupervised tasks concerning the cooccurrence of visualizations. We evaluate our method through an ablation study, a user study, and a quantitative comparison. The results verified the consistency of our embedding method with human cognition and showed its advantages over existing methods.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Qing Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ying Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ruishi Zou"},{"affiliations":"","email":"","is_corresponding":false,"name":"Wei Shuai"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yi Guo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jiazhe Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nan Cao"}],"award":"","doi":"10.1109/TVCG.2024.3383089","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243383089","image_caption":"To capture the information of a single visualization, we designed the Chart2Vec model. The input embedding module transforms the raw data into a vector format containing both fact schema and fact semantics, the encoder module then employs feature pooling and feature fusion to achieve the final vector representation. ","keywords":["Representation Learning, Multi-view Visualization, Visual Storytelling, Visualization Embedding"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2306.08304","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/NWNMgWnT7NM&t=0h13m19s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243383089/v-tvcg-20243383089_Preview.mp4?token=YaziDZQuh9K5a72_sHE8lgKpS45Ez70snplKQ4wqrAI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243383089/v-tvcg-20243383089_Preview.srt?token=f81u_zS7B44sxxCqfko0XEpX-AvGZ0OHaZCSIa6_lIg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full19","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Recommendation","session_uid":"v-tvcg","session_youtube_ff_id":"b1lGAY8V3S4","session_youtube_ff_link":"https://youtu.be/b1lGAY8V3S4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/NWNMgWnT7NM&t=0h13m19s","sessions":["Visualization Recommendation"],"time_stamp":"2024-10-17T12:54:00Z","title":"Chart2Vec: A Universal Embedding of Context-Aware Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1059","abstract":"Digital twin models are of high interest to Head and Neck Cancer (HNC) oncologists, who have to navigate a series of complex treatment decisions that weigh the efficacy of tumor control against toxicity and mortality risks. Evaluating individual risk profiles necessitates a deeper understanding of the interplay between different factors such as patient health, spatial tumor location and spread, and risk of subsequent toxicities that can not be adequately captured through simple heuristics. To support clinicians in better understanding tradeoffs when deciding on treatment courses, we developed DITTO, a digital-twin and visual computing system that allows clinicians to analyze detailed risk profiles for each patient, and decide on a treatment plan. DITTO relies on a sequential Deep Reinforcement Learning digital twin (DT) to deliver personalized risk of both long-term and short-term disease outcome and toxicity risk for HNC patients. Based on a participatory collaborative design alongside oncologists, we also implement several visual explainability methods to promote clinical trust and encourage healthy skepticism when using our system. We evaluate the efficacy of DITTO through quantitative evaluation of performance and case studies with qualitative feedback. Finally, we discuss design lessons for developing clinical visual XAI applications for clinical end users.","accessible_pdf":false,"authors":[{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"awentze2@uic.edu","is_corresponding":true,"name":"Andrew Wentzel"},{"affiliations":["University of Houston, Houston, United States"],"email":"skattia@mdanderson.org","is_corresponding":false,"name":"Serageldin Attia"},{"affiliations":["University of Illinois Chicago, Chicago, United States"],"email":"zhangz@uic.edu","is_corresponding":false,"name":"Xinhua Zhang"},{"affiliations":["University of Iowa, Iowa City, United States"],"email":"guadalupe-canahuate@uiowa.edu","is_corresponding":false,"name":"Guadalupe Canahuate"},{"affiliations":["University of Texas, Houston, United States"],"email":"cdfuller@mdanderson.org","is_corresponding":false,"name":"Clifton David Fuller"},{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"g.elisabeta.marai@gmail.com","is_corresponding":false,"name":"G. Elisabeta Marai"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1059","image_caption":"Overview of DITTO. (A) Input panel to alter model parameters and input patient features. (B) Temporal outcome risk plots for the patient based on different models and treatment groups. (C) Treatment recommendation based on the twin model and similar patients. (D) Auxiliary data panel, currently showing a waterfall plot of how each feature cumulatively contributes to the model decision.","keywords":["Medicine; Machine Learning; Application Domains; High Dimensional data; Spatial Data; Activity Centered Design"],"open_access_supplemental_link":"https://osf.io/qhu7f/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.48550/arXiv.2407.13107","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/0TNqponA2lk&t=1h3m8s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1059/v-full-1059_Preview.mp4?token=36Loh-7OLpLaxVBBBoJ5FVLn6dJxoh991w5oHDdNbKQ&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-full","session_youtube_ff_id":"4AmQkVSrVdE","session_youtube_ff_link":"https://youtu.be/4AmQkVSrVdE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=1h3m8s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T18:45:00Z","title":"DITTO: A Visual Digital Twin for Interventions and Temporal Treatment Outcomes in Head and Neck Cancer","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1805","abstract":"The optimization of cooling systems is important in many cases, for example for cabin and battery cooling in electric cars. Such an optimization is governed by multiple, conflicting objectives and it is performed across a multi-dimensional parameter space.The extent of the parameter space, the complexity of the non-linear model of the system,as well as the time needed per simulation run and factors that are not modeled in the simulation necessitate an iterative, semi-automatic approach. We present an interactive visual optimization approach, where the user works with a p-h diagram to steer an iterative, guided optimization process. A deep learning (DL) model provides estimates for parameters, given a target characterization of the system, while numerical simulation is used to compute system characteristics for an ensemble of parameter sets. Since the DL model only serves as an approximation of the inverse of the cooling system and since target characteristics can be chosen according to different, competing objectives, an iterative optimization process is realized, developing multiple sets of intermediate solutions, which are visually related to each other.The standard p-h diagram, integrated interactively in this approach, is complemented by a dual, also interactive visual representation of additional expressive measures representing the system characteristics. We show how the known four-points semantic of the p-h diagram meaningfully transfers to the dual data representation.When evaluating this approach in the automotive domain, we found that our solution helped with the overall comprehension of the cooling system and that it lead to a faster convergence during optimization. ","accessible_pdf":false,"authors":[{"affiliations":["VRVis Research Center, Vienna, Austria"],"email":"splechtna@vrvis.at","is_corresponding":true,"name":"Rainer Splechtna"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"behravan@vt.edu","is_corresponding":false,"name":"Majid Behravan"},{"affiliations":["AVL AST doo, Zagreb, Croatia"],"email":"mario.jelovic@avl.com","is_corresponding":false,"name":"Mario Jelovic"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"gracanin@vt.edu","is_corresponding":false,"name":"Denis Gracanin"},{"affiliations":["University of Bergen, Bergen, Norway"],"email":"helwig.hauser@uib.no","is_corresponding":false,"name":"Helwig Hauser"},{"affiliations":["VRVis Research Center, Vienna, Austria"],"email":"matkovic@vrvis.at","is_corresponding":false,"name":"Kresimir Matkovic"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1805","image_caption":"The interactive p-h diagram, central to interactive design of experiments for cooling systems, presents multiple layers of information: user-defined desired points (in shades of red), simulated points generated by parameters predicted through deep learning (shades of blue), and scatterplots offering a dual data perspective (with lines connecting Deep Learning prediction and simulation for the same parameters). ","keywords":["Parameter space exploration"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.12607","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/0TNqponA2lk&t=0h38m51s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1805/v-full-1805_Preview.mp4?token=vbIksADqVloSgv_7CEcnLkzX0sf30vsoN5mp0KBDjH0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1805/v-full-1805_Preview.srt?token=4CbReupRi8r5i9I_rOR3J4R7GGd_rHw_7goN2_bpA14&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-full","session_youtube_ff_id":"zGpaBxAqkHw","session_youtube_ff_link":"https://youtu.be/zGpaBxAqkHw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=0h38m51s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T18:21:00Z","title":"Interactive Design-of-Experiments: Optimizing a Cooling System","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1865","abstract":"In medical diagnostics of both early disease detection and routine patient care, particle-based contamination of in-vitro diagnostics consumables poses a significant threat to patients. Objective data-driven decision-making on the severity of contamination is key for reducing patient risk, while saving time and cost in quality assessment. Our collaborators introduced us to their quality control process, including particle data acquisition through image recognition, feature extraction, and attributes reflecting the production context of particles. Shortcomings in the current process are limitations in exploring thousands of images, data-driven decision making, and ineffective knowledge externalization. Following the design study methodology, our contributions are a characterization of the problem space and requirements, the development and validation of DaedalusData, a comprehensive discussion of our study's learnings, and a generalizable framework for knowledge externalization. DaedalusData is a visual analytics system that enables domain experts to explore particle contamination patterns, label particles in label alphabets, and externalize knowledge through semi-supervised label-informed data projections. The results of our case study and user study show high usability of DaedalusData and its efficient support of experts in generating comprehensive overviews of thousands of particles, labeling of large quantities of particles, and externalizing knowledge to augment the dataset further. Reflecting on our approach, we discuss insights on dataset augmentation via human knowledge externalization, and on the scalability and trade-offs that come with the adoption of this approach in practice.","accessible_pdf":false,"authors":[{"affiliations":["Roche pRED, Basel, Switzerland","University of Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"alexander.wyss@protonmail.com","is_corresponding":false,"name":"Alexander Wyss"},{"affiliations":["University of Zurich, Zurich, Switzerland","Digital Society Initiativ, Zurich, Switzerland"],"email":"gab.morgenshtern@gmail.com","is_corresponding":false,"name":"Gabriela Morgenshtern"},{"affiliations":["Roche Diagnostics International, Rotkreuz, Switzerland"],"email":"a.hirschhuesler@gmail.com","is_corresponding":false,"name":"Amanda Hirsch-H\u00fcsler"},{"affiliations":["University of Zurich, Zurich, Switzerland","Digital Society Initiativ, Zurich, Switzerland"],"email":"bernard@ifi.uzh.ch","is_corresponding":false,"name":"J\u00fcrgen Bernard"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1865","image_caption":"The DaedalusData framework supports two control modes for experts to steer the particle display with, shown here as a 2 \u00d7 2 matrix. Vertical: Experts choose between the Attribute View (for one attribute) and the Projection View (for multiple user-specified attributes) to identify areas of interest, and discover similar particles to label. Horizontal: Experts choose to explore either the Pre-Existing Data Attributes (the Image & Production Context), or to extend the exploration to Augmented Data Attributes created through particle labeling (Expert Knowledge). This design study implements a systematic cross-cut of all four types of control, addressing expert-contributed design requirements. ","keywords":["Visual Analytics, Image Data, Knowledge Externalization, Data Labeling, Anomaly Detection, Medical Manufacturing"],"open_access_supplemental_link":"https://github.com/alexv710/DaedalusData---IEEE-VIS-Supplemental","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.04749","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/0TNqponA2lk&t=0h50m52s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1865/v-full-1865_Preview.mp4?token=0EQEAR0eBMnTqI4vpUWiBIPLB73GJ3Ed4bw9fMtXw0A&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1865/v-full-1865_Preview.srt?token=CqKfM7RMtFtdxiCe3pzjkNZ41jjLObsXxcnKWjbAfLI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-full","session_youtube_ff_id":"TUuS_IaBoRg","session_youtube_ff_link":"https://youtu.be/TUuS_IaBoRg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=0h50m52s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T18:33:00Z","title":"DaedalusData: Exploration, Knowledge Externalization and Labeling of Particles in Medical Manufacturing - A Design Study","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233332999","abstract":"Quantum computing offers significant speedup compared to classical computing, which has led to a growing interest among users in learning and applying quantum computing across various applications. However, quantum circuits, which are fundamental for implementing quantum algorithms, can be challenging for users to understand due to their underlying logic, such as the temporal evolution of quantum states and the effect of quantum amplitudes on the probability of basis quantum states. To fill this research gap, we propose QuantumEyes, an interactive visual analytics system to enhance the interpretability of quantum circuits through both global and local levels. For the global-level analysis, we present three coupled visualizations to delineate the changes of quantum states and the underlying reasons: a Probability Summary View to overview the probability evolution of quantum states; a State Evolution View to enable an in-depth analysis of the influence of quantum gates on the quantum states; a Gate Explanation View to show the individual qubit states and facilitate a better understanding of the effect of quantum gates. For the local-level analysis, we design a novel geometrical visualization dandelion chart to explicitly reveal how the quantum amplitudes affect the probability of the quantum state. We thoroughly evaluated QuantumEyes as well as the novel dandelion chart integrated into it through two case studies on different types of quantum algorithms and in-depth expert interviews with 12 domain experts. The results demonstrate the effectiveness and usability of our approach in enhancing the interpretability of quantum circuits.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Shaolun Ruan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Qiang Guan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Paul Griffin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ying Mao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yong Wang"}],"award":"","doi":"10.1109/TVCG.2023.3332999","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233332999","image_caption":"We propose QuantumEyes, an interactive visualization system to enhance the interpretability of general quantum circuits, with the integration of a visual design called Dandelion Chart to explain the quantum states regarding the probability and amplitudes of each basis states.","keywords":["Data visualization, design study, interpretability, quantum computing."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2311.07980","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/0TNqponA2lk&t=0h14m2s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233332999/v-tvcg-20233332999_Preview.mp4?token=KYsEFgeyQN1AgvlKSQJWtfU9VAyZfND8R6jfQJvuAe8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233332999/v-tvcg-20233332999_Preview.srt?token=IKfsu3IkAuw3dU6k3iBiR4xWulNiC-UvBTMwRRQGTXo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-tvcg","session_youtube_ff_id":"SPYRqbzGtdA","session_youtube_ff_link":"https://youtu.be/SPYRqbzGtdA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=0h14m2s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T17:57:00Z","title":"QuantumEyes: Towards Better Interpretability of Quantum Circuits","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233337173","abstract":"Visualization design studies bring together visualization researchers and domain experts to address yet unsolved data analysis challenges stemming from the needs of the domain experts. Typically, the visualization researchers lead the design study process and implementation of any visualization solutions. This setup leverages the visualization researchers' knowledge of methodology, design, and programming, but the availability to synchronize with the domain experts can hamper the design process. We consider an alternative setup where the domain experts take the lead in the design study, supported by the visualization experts. In this study, the domain experts are computer architecture experts who simulate and analyze novel computer chip designs. These chips rely on a Network-on-Chip (NOC) to connect components. The experts want to understand how the chip designs perform and what in the design led to their performance. To aid this analysis, we develop Vis4Mesh, a visualization system that provides spatial, temporal, and architectural context to simulated NOC behavior. Integration with an existing computer architecture visualization tool enables architects to perform deep-dives into specific architecture component behavior. We validate Vis4Mesh through a case study and a user study with computer architecture researchers. We reflect on our design and process, discussing advantages, disadvantages, and guidance for engaging in a domain expert-led design studies.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Shaoyu Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hang Yan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Katherine E. Isaacs"},{"affiliations":"","email":"","is_corresponding":true,"name":"Yifan Sun"}],"award":"","doi":"10.1109/TVCG.2023.3337173","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233337173","image_caption":"Vis4Mesh is a tool that allows computer architects to find architectural cause of the performance issues on a Network-on-Chip system.","keywords":["Data Visualization, Design Study, Network-on-Chip, Performance Analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://www.researchgate.net/publication/376004885_Visual_Exploratory_Analysis_for_Designing_Large-Scale_Network-on-Chip_Architectures_A_Domain_Expert-Led_Design_Study","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/0TNqponA2lk&t=0h0m45s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233337173/v-tvcg-20233337173_Preview.mp4?token=nO7EurtSmn1LF4IwC6akfQu2SLnIRyYXJaFKpQAU-4U&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233337173/v-tvcg-20233337173_Preview.srt?token=EAjXUyLl1KOQBaHfR23V63VuNlLF4OB3xb9u1pCm0iY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-tvcg","session_youtube_ff_id":"BqQmgA_KYII","session_youtube_ff_link":"https://youtu.be/BqQmgA_KYII","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=0h0m45s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T17:45:00Z","title":"Visual Exploratory Analysis for Designing Large-Scale Network-on-Chip Architectures: A Domain Expert-Led Design Study","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243382607","abstract":"Advanced manufacturing creates increasingly complex objects with material compositions that are often difficult to characterize by a single modality. Our domain scientists are going beyond traditional methods by employing both X-ray and neutron computed tomography to obtain complementary representations expected to better resolve material boundaries. However, the use of two modalities creates its own challenges for visualization, requiring either complex adjustments of multimodal transfer functions or the need for multiple views. Together with experts in nondestructive evaluation, we designed a novel interactive multimodal visualization approach to create a combined view of the co-registered X-ray and neutron acquisitions of industrial objects. Using an automatic topological segmentation of the bivariate histogram of X-ray and neutron values as a starting point, the system provides a simple yet effective interface to easily create, explore, and adjust a multimodal isualization. We propose a widget with simple brushing interactions that enables the user to quickly correct the segmented histogram results. Our semiautomated system enables domain experts to intuitively explore large multimodal datasets without the need for either advanced segmentation algorithms or knowledge of visualization echniques. We demonstrate our approach using synthetic examples, industrial phantom objects created to stress multimodal scanning techniques, and real-world objects, and we discuss expert feedback.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Huang, Xuan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Miao, Haichao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kim, Hyojin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Townsend, Andrew"},{"affiliations":"","email":"","is_corresponding":false,"name":"Champley, Kyle"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tringe, Joseph"},{"affiliations":"","email":"","is_corresponding":false,"name":"Pascucci, Valerio"},{"affiliations":"","email":"","is_corresponding":false,"name":"Bremer, Peer-Timo"}],"award":"","doi":"10.1109/TVCG.2024.3382607","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243382607","image_caption":"The X-Ray and neutron computed tomography industrial object XR05, consisting of multiple materials and intrinsic structures. With a morse-complex based segmentation (bottom left) on the bivariate histogram combing two modalities (top left), we present an efficient yet flexible system for examining material compositions (right).","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.11957","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/0TNqponA2lk&t=0h27m7s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243382607/v-tvcg-20243382607_Preview.mp4?token=tXtWD-Fz0i8u7eIfUdgouiGgwZ7L-nJDcmSmrOsmXGY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243382607/v-tvcg-20243382607_Preview.srt?token=aqpJ-RnPiFguRY97IwuBpLjy2Yk82W6DC49AwGefdOI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full2","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Applications: Industry, Computing, and Medicine","session_uid":"v-tvcg","session_youtube_ff_id":"sGc9lmaxeHI","session_youtube_ff_link":"https://youtu.be/sGc9lmaxeHI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/0TNqponA2lk&t=0h27m7s","sessions":["Applications: Industry, Computing, and Medicine"],"time_stamp":"2024-10-17T18:09:00Z","title":"Bimodal Visualization of Industrial X-ray and Neutron Computed Tomography Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1100","abstract":"``Correlation does not imply causation'' is a famous mantra in statistical and visual analysis. However, consumers of visualizations often draw causal conclusions when only correlations between variables are shown. In this paper, we investigate factors that contribute to causal relationships users perceive in visualizations. We collected a corpus of concept pairs from variables in widely used datasets and created visualizations that depict varying correlative associations using three typical statistical chart types. We conducted two MTurk studies on (1) preconceived notions on causal relations without charts, and (2) perceived causal relations with charts, for each concept pair. Our results indicate that people make assumptions about causal relationships between pairs of concepts even without seeing any visualized data. Moreover, our results suggest that these assumptions constitute causal priors that, in combination with visualized association, impact how data visualizations are interpreted. The results also suggest that causal priors may lead to over- or under-estimation in perceived causal relations in different circumstances, and that those priors can also impact users' confidence in their causal assessments. In addition, our results align with prior work, indicating that chart type may also affect causal inference. Using data from the studies, we develop a model to capture the interaction between causal priors and visualized associations as they combine to impact a user's perceived causal relations. In addition to reporting the study results and analyses, we provide an open dataset of causal priors for 56 specific concept pairs that can serve as a potential benchmark for future studies. We also suggest remaining challenges and heuristic-based guidelines to help designers improve visualization design choices to better support visual causal inference.","accessible_pdf":false,"authors":[{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"zeyuwang@cs.unc.edu","is_corresponding":true,"name":"Arran Zeyu Wang"},{"affiliations":["UNC-Chapel Hill, Chapel Hill, United States"],"email":"borland@renci.org","is_corresponding":false,"name":"David Borland"},{"affiliations":["Davidson College, Davidson, United States"],"email":"tapeck@davidson.edu","is_corresponding":false,"name":"Tabitha C. Peck"},{"affiliations":["University of North Carolina, Chapel Hill, United States"],"email":"vaapad@live.unc.edu","is_corresponding":false,"name":"Wenyuan Wang"},{"affiliations":["University of North Carolina, Chapel Hill, United States"],"email":"gotz@unc.edu","is_corresponding":false,"name":"David Gotz"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1100","image_caption":"Results of participant-rated causal relationships for 56 concept pairs from open-source datasets. Participants rated the causal impact of X on Y for each pair on a scale of 1 to 5. The Y-axis in (a) shows these scores, ordered by mean causal relation on the X-axis with 95% confidence intervals. The light blue band represents the mean score +/- one standard deviation (SD). Vertical dashed lines indicate low (mean+SD) causal priors. (b) presents heat maps for four example pairs, showing participant scores. The study highlights the variability in causal priors and their impact on visualization interpretation.","keywords":["Causal inference, Perception and cognition, Causal prior, Association, Causality, Visualization"],"open_access_supplemental_link":"https://osf.io/dfkv4/?view_only=f84ffbc28cdf45e5a3d68f2f1e9c8427","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/rSvwf4L8jPc&t=0h36m56s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1100/v-full-1100_Preview.mp4?token=-tczNKjms6dku05m_c2tjjPtjHG0S_SDlRk3Z_2yHXI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1100/v-full-1100_Preview.srt?token=ovnAtbhGlSfYKYC6CnnCq5286IIhpEuvR4Hq-UFGsoI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-full","session_youtube_ff_id":"-9MypSwTv8w","session_youtube_ff_link":"https://youtu.be/-9MypSwTv8w","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=0h36m56s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T14:51:00Z","title":"Causal Priors and Their Influence on Judgements of Causality in Visualized Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1202","abstract":"The Dunning-Kruger Effect (DKE) is a metacognitive phenomenon where low-skilled individuals tend to overestimate their competence while high-skilled individuals tend to underestimate their competence. This effect has been observed in a number of domains including humor, grammar, and logic. In this paper, we explore if and how DKE manifests in visual reasoning and judgment tasks. Across two online user studies involving (1) a sliding puzzle game and (2) a scatterplot-based categorization task, we demonstrate that individuals are susceptible to DKE in visual reasoning and judgment tasks: those who performed best underestimated their performance, while bottom performers overestimated their performance. In addition, we contribute novel analyses that correlate susceptibility of DKE with personality traits and user interactions. Our findings pave the way for novel modes of bias detection via interaction patterns and establish promising directions towards interventions tailored to an individual\u2019s personality traits. All materials and analyses are in supplemental materials: https://github.com/CAV-Lab/DKE_supplemental.git.","accessible_pdf":false,"authors":[{"affiliations":["Emory University, Atlanta, United States"],"email":"mengyu.chen@emory.edu","is_corresponding":true,"name":"Mengyu Chen"},{"affiliations":["Emory University, Atlanta, United States"],"email":"yijun.liu2@emory.edu","is_corresponding":false,"name":"Yijun Liu"},{"affiliations":["Emory University, Atlanta, United States"],"email":"emily.wall@emory.edu","is_corresponding":false,"name":"Emily Wall"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1202","image_caption":"We replicated the Dunning-Kruger Effect (DKE) across tasks involving visual reasoning and judgment. We observed a typical DKE pattern, where highly skilled people tend to underestimate their performance, while those with lower skills often overestimate it. Additionally, we explored potential indicators of DKE, including participants\u2019 interactions, personality traits, and domain familiarity, and identified several factors related to DKE.","keywords":["Cognitive Bias, Dunning Kruger Effect, Metacognition, Personality Traits, Interactions, Visual Reasoning"],"open_access_supplemental_link":"https://github.com/CAV-Lab/DKE_supplemental.git","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/rSvwf4L8jPc&t=0h13m38s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1202/v-full-1202_Preview.mp4?token=1Usg6-YU7WtR6uxtp0I104Kebbawx3X1E4TIwbqbnMI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1202/v-full-1202_Preview.srt?token=nKO3RkoaNQ_z7MQ987vVLVedXUQgyfEKL8JUDHrbopY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-full","session_youtube_ff_id":"-paNXRpqH1E","session_youtube_ff_link":"https://youtu.be/-paNXRpqH1E","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=0h13m38s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T14:27:00Z","title":"Unmasking Dunning-Kruger Effect in Visual Reasoning and Visual Data Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1256","abstract":"People commonly utilize visualizations not only to examine a given dataset, but also to draw generalizable conclusions about the underlying models or phenomena. Prior research has compared human visual inference to that of an optimal Bayesian agent, with deviations from rational analysis viewed as problematic. However, human reliance on non-normative heuristics may prove advantageous in certain circumstances. We investigate scenarios where human intuition might surpass idealized statistical rationality. In two experiments, we examine individuals\u2019 accuracy in characterizing the parameters of known data-generating models from bivariate visualizations. Our findings indicate that, although participants generally exhibited lower accuracy compared to statistical models, they frequently outperformed Bayesian agents, particularly when faced with extreme samples. Participants appeared to rely on their internal models to filter out noisy visualizations, thus improving their resilience against spurious data. However, participants displayed overconfidence and struggled with uncertainty estimation. They also exhibited higher variance than statistical machines. Our findings suggest that analyst gut reactions to visualizations may provide an advantage, even when departing from rationality. These results carry implications for designing visual analytics tools, offering new perspectives on how to integrate statistical models and analyst intuition for improved inference and decision-making. The data and materials for this paper are available at https://osf.io/qmfv6","accessible_pdf":false,"authors":[{"affiliations":["Indiana University, Indianapolis, United States"],"email":"rkoonch@iu.edu","is_corresponding":true,"name":"Ratanond Koonchanok"},{"affiliations":["Argonne National Laboratory, Lemont, United States","University of Illinois Chicago, Chicago, United States"],"email":"papka@anl.gov","is_corresponding":false,"name":"Michael E. Papka"},{"affiliations":["Indiana University, Indianapolis, United States"],"email":"redak@iu.edu","is_corresponding":false,"name":"Khairi Reda"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1256","image_caption":"In this paper, we compare the ability of humans and statistical models to characterize the mean and uncertainty of the data-generating model based on visualized samples. Our results indicate that humans can outperform statistical models when faced with extreme samples. ","keywords":["Visual inference, statistical rationality, human-machine collaboration"],"open_access_supplemental_link":"https://osf.io/qmfv6","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.16871","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/rSvwf4L8jPc&t=0h26m41s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1256/v-full-1256_Preview.mp4?token=ilWiqf4xF0Ne1wq9zXyw97jLPCm2BTpcsVHohWb1yEI&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-full","session_youtube_ff_id":"bj8YXso5ly0","session_youtube_ff_link":"https://youtu.be/bj8YXso5ly0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=0h26m41s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T14:39:00Z","title":"Trust Your Gut: Comparing Human and Machine Inference from Noisy Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233326698","abstract":"Researchers have derived many theoretical models for specifying users\u2019 insights as they interact with a visualization system. These representations are essential for understanding the insight discovery process, such as when inferring user interaction patterns that lead to insight or assessing the rigor of reported insights. However, theoretical models can be difficult to apply to existing tools and user studies, often due to discrepancies in how insight and its constituent parts are defined. This paper calls attention to the consistent structures that recur across the visualization literature and describes how they connect multiple theoretical representations of insight. We synthesize a unified formalism for insights using these structures, enabling a wider audience of researchers and developers to adopt the corresponding models. Through a series of theoretical case studies, we use our formalism to compare and contrast existing theories, revealing interesting research challenges in reasoning about a user's domain knowledge and leveraging synergistic approaches in data mining and data management research.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Leilani Battle"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alvitta Ottley"}],"award":"","doi":"10.1109/TVCG.2023.3326698","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233326698","image_caption":"Inspired by existing definitions of insight, we present a unifying theory for the structure of insights discovered during visual analysis. The key idea is that an insight links analytic knowledge uncovered through data transformations/visualizations with the user's external domain knowledge. This core insight structure can then be adapted to form more complex insights, such as through further linking and nesting of existing insight objects. Informed by this theory, we contribute a toolkit named Pyxis for specifying insights in JavaScript code as well as motivating usage scenarios for Pyxis to advance future visualization theory, systems, and user studies.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/rSvwf4L8jPc&t=1h2m50s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233326698/v-tvcg-20233326698_Preview.mp4?token=obF0SbCwEZB17AEIa4zjAvqXcFfYXzOO1YmYzKe5g-4&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-tvcg","session_youtube_ff_id":"pih94nB6Mc4","session_youtube_ff_link":"https://youtu.be/pih94nB6Mc4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=1h2m50s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T15:15:00Z","title":"What Do We Mean When We Say \u201cInsight\u201d? A Formal Synthesis of Existing Theory","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233346640","abstract":"Is it true that if citizens understand hurricane probabilities, they will make more rational decisions for evacuation? Finding answers to such questions is not straightforward in the literature because the terms \u201c judgment \u201d and \u201c decision making \u201d are often used interchangeably. This terminology conflation leads to a lack of clarity on whether people make suboptimal decisions because of inaccurate judgments of information conveyed in visualizations or because they use alternative yet currently unknown heuristics. To decouple judgment from decision making, we review relevant concepts from the literature and present two preregistered experiments (N=601) to investigate if the task (judgment vs. decision making), the scenario (sports vs. humanitarian), and the visualization (quantile dotplots, density plots, probability bars) affect accuracy. While experiment 1 was inconclusive, we found evidence for a difference in experiment 2. Contrary to our expectations and previous research, which found decisions less accurate than their direct-equivalent judgments, our results pointed in the opposite direction. Our findings further revealed that decisions were less vulnerable to status-quo bias, suggesting decision makers may disfavor responses associated with inaction. We also found that both scenario and visualization types can influence people's judgments and decisions. Although effect sizes are not large and results should be interpreted carefully, we conclude that judgments cannot be safely used as proxy tasks for decision making, and discuss implications for visualization research and beyond. Materials and preregistrations are available at https://osf.io/ufzp5/?view_only=adc0f78a23804c31bf7fdd9385cb264f.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Ba\u015fak Oral"},{"affiliations":"","email":"","is_corresponding":false,"name":"Pierre Dragicevic"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alexandru Telea"},{"affiliations":"","email":"","is_corresponding":false,"name":"Evanthia Dimara"}],"award":"","doi":"10.1109/TVCG.2023.3346640","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233346640","image_caption":"The image shows a scale with a large question mark in the center, asking whether the two concepts are the same: 'Judgment,' symbolized by a magnifying glass on the left side, and 'Decision,' symbolized by a checklist on the right side.","keywords":["Data visualization, Task analysis, Decision making, Visualization, Bars, Sports, Terminology, Cognition, Decision Making, Judgment, Psychology, Visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-04354869/file/OralDecoupling.pdf","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/rSvwf4L8jPc&t=0h0m48s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346640/v-tvcg-20233346640_Preview.mp4?token=bFPCtI--QDmxBpAxwoJ95iAO_SLRw8i-Gmi9ezsctD4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346640/v-tvcg-20233346640_Preview.srt?token=HQjTPEwxtlkgdZ20Y3hgLGQPO1SjBmvRuq8ERFTRpVU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-tvcg","session_youtube_ff_id":"GojbFqP_xqs","session_youtube_ff_link":"https://youtu.be/GojbFqP_xqs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=0h0m48s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T14:15:00Z","title":"Decoupling Judgment and Decision Making: A Tale of Two Tails","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233346713","abstract":"Recent growth in the popularity of large language models has led to their increased usage for summarizing, predicting, and generating text, making it vital to help researchers and engineers understand how and why they work. We present KnowledgeVIS , a human-in-the-loop visual analytics system for interpreting language models using fill-in-the-blank sentences as prompts. By comparing predictions between sentences, KnowledgeVIS reveals learned associations that intuitively connect what language models learn during training to natural language tasks downstream, helping users create and test multiple prompt variations, analyze predicted words using a novel semantic clustering technique, and discover insights using interactive visualizations. Collectively, these visualizations help users identify the likelihood and uniqueness of individual predictions, compare sets of predictions between prompts, and summarize patterns and relationships between predictions across all prompts. We demonstrate the capabilities of KnowledgeVIS with feedback from six NLP experts as well as three different use cases: (1) probing biomedical knowledge in two domain-adapted models; and (2) evaluating harmful identity stereotypes and (3) discovering facts and relationships between three general-purpose models.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Adam Coscia"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alex Endert"}],"award":"","doi":"10.1109/TVCG.2023.3346713","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233346713","image_caption":"Evaluating generative LLMs for stereotypes and biases is hard. Fill-in-the-blank sentences as prompts can reveal biases, yet many fill-in-the-blank analysis methods are limited to one sentence at a time. Our solution, KnowledgeVIS, makes it easy to create multiple sentence prompts, then visually compare LLM predictions across sentences. We studied how KnowledgeVIS helps developers close the loop of LLM evaluation and contribute guidelines for improving human-in-the-loop NLP. KnowledgeVIS is open-source and live at: https://github.com/AdamCoscia/KnowledgeVIS. For the full story, please read our paper!","keywords":["Visual analytics, language models, prompting, interpretability, machine learning."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/pdf/2403.04758","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/rSvwf4L8jPc&t=0h50m32s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346713/v-tvcg-20233346713_Preview.mp4?token=f6g5iPvFyz17qSg3QF89xpvfwdqLRXE7iBD5fJRN3Vc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346713/v-tvcg-20233346713_Preview.srt?token=s6wQXlGhwA-bODTFxNuCgLFi6qsuY_1vOZA_vJAaRZ0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full20","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Judgment and Decision-making","session_uid":"v-tvcg","session_youtube_ff_id":"OhiCpSl5jgs","session_youtube_ff_link":"https://youtu.be/OhiCpSl5jgs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/rSvwf4L8jPc&t=0h50m32s","sessions":["Judgment and Decision-making"],"time_stamp":"2024-10-16T15:03:00Z","title":"KnowledgeVIS: Interpreting Language Models by Comparing Fill-in-the-Blank Prompts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1142","abstract":"To deploy machine learning models on-device, practitioners use compression algorithms to shrink and speed up models while maintaining their high-quality output. A critical aspect of compression in practice is model comparison, including tracking many compression experiments, identifying subtle changes in model behavior, and negotiating complex accuracy-efficiency trade-offs. However, existing compression tools poorly support comparison, leading to tedious and, sometimes, incomplete analyses spread across disjoint tools. To support real-world comparative workflows, we develop an interactive visual system called Compress and Compare. Within a single interface, Compress and Compare surfaces promising compression strategies by visualizing provenance relationships between compressed models and reveals compression-induced behavior changes by comparing models\u2019 predictions, weights, and activations. We demonstrate how Compress and Compare supports common compression analysis tasks through two case studies, debugging failed compression on generative language models and identifying compression artifacts in image classification models. We further evaluate Compress and Compare in a user study with eight compression experts, illustrating its potential to provide structure to compression workflows, help practitioners build intuition about compression, and encourage thorough analysis of compression\u2019s effect on model behavior. Through these evaluations, we identify compression-specific challenges that future visual analytics tools should consider and Compress and Compare visualizations that may generalize to broader model comparison tasks.","accessible_pdf":true,"authors":[{"affiliations":["Massachusetts Institute of Technology, Cambridge, United States"],"email":"aboggust@mit.edu","is_corresponding":true,"name":"Angie Boggust"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"vsivaram@andrew.cmu.edu","is_corresponding":false,"name":"Venkatesh Sivaraman"},{"affiliations":["Apple, Cambridge, United States"],"email":"yassogba@gmail.com","is_corresponding":false,"name":"Yannick Assogba"},{"affiliations":["Apple, Seattle, United States"],"email":"donghao@apple.com","is_corresponding":false,"name":"Donghao Ren"},{"affiliations":["Apple, Pittsburgh, United States"],"email":"domoritz@cmu.edu","is_corresponding":false,"name":"Dominik Moritz"},{"affiliations":["Apple, Seattle, United States"],"email":"fred.hohman@gmail.com","is_corresponding":false,"name":"Fred Hohman"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1142","image_caption":"Compress and Compare helps ML practitioners analyze and compare compression experiments. The Model Map helps practitioners understand what experiments were run and find high-performing sequences of operations, while the Model Scatterplot and Selection Details views help compare accuracy and efficiency metrics quantitatively. Our paper describes the challenges that Compress and Compare addresses, how we designed the system, and a study with eight experts demonstrating its potential to support compression workflows.","keywords":["Efficient machine learning, model compression, visual analytics, model comparison"],"open_access_supplemental_link":"https://github.com/apple/ml-compress-and-compare","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/pdf/2408.03274","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ESst2nxcXuA&t=0h53m30s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1142/v-full-1142_Preview.mp4?token=7bLX7TtYd-7Di9wJoeHtOTxkKehZFEDVJj3D4r93Uoo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1142/v-full-1142_Preview.srt?token=sApfAyEMxtKgQ5SzPXgHjhpIke2I3KKtKRVsbusIKtk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-full","session_youtube_ff_id":"5tS7HFn5W6Y","session_youtube_ff_link":"https://youtu.be/5tS7HFn5W6Y","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=0h53m30s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T13:18:00Z","title":"Compress and Compare: Interactively Evaluating Efficiency and Behavior Across ML Model Compression Experiments","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1179","abstract":"Multi-objective evolutionary algorithms (MOEAs) have emerged as powerful tools for solving complex optimization problems characterized by multiple, often conflicting, objectives. While advancements have been made in computational efficiency as well as diversity and convergence of solutions, a critical challenge persists: the internal evolutionary mechanisms are opaque to human users. Drawing upon the successes of explainable AI in explaining complex algorithms and models, we argue that the need to understand the underlying evolutionary operators and population dynamics within MOEAs aligns well with a visual analytics paradigm. This paper introduces ParetoTracker, a visual analytics framework designed to support the comprehension and inspection of population dynamics in the evolutionary processes of MOEAs. Informed by preliminary literature review and expert interviews, the framework establishes a multi-level analysis scheme, which caters to user engagement and exploration ranging from examining overall trends in performance metrics to conducting fine-grained inspections of evolutionary operations. In contrast to conventional practices that require manual plotting of solutions for each generation, ParetoTracker facilitates the examination of temporal trends and dynamics across consecutive generations in an integrated visual interface. The effectiveness of the framework is demonstrated through case studies and expert interviews focused on widely adopted benchmark optimization problems.","accessible_pdf":false,"authors":[{"affiliations":["Southern University of Science and Technology, Shenzhen, China"],"email":"zhangzr32021@mail.sustech.edu.cn","is_corresponding":false,"name":"Zherui Zhang"},{"affiliations":["Southern University of Science and Technology, Shenzhen, China"],"email":"yangf2020@mail.sustech.edu.cn","is_corresponding":true,"name":"Fan Yang"},{"affiliations":["Southern University of Science and Technology, Shenzhen, China"],"email":"ranchengcn@gmail.com","is_corresponding":false,"name":"Ran Cheng"},{"affiliations":["Southern University of Science and Technology, Shenzhen, China"],"email":"mayx@sustech.edu.cn","is_corresponding":false,"name":"Yuxin Ma"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1179","image_caption":"We introduce ParetoTracker, a visual analytics framework designed to illustrate the dynamics of population generations within evolutionary processes of MOEAs, which consists of three main components: Performance Overview and Generation Statistics (A) Visual Exploration of Individuals among Generations (B) In-depth Visual Inspection of Operators (C).","keywords":["Visual analytics, multi-objective evolutionary algorithms, evolutionary computation"],"open_access_supplemental_link":"https://github.com/VIS-SUSTech/ParetoTracker","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.04539","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ESst2nxcXuA&t=1h8m24s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1179/v-full-1179_Preview.mp4?token=HiWFcD9cnvwRDeNVeu8K30udpLarCgMmf95DedtPB38&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-full","session_youtube_ff_id":"iExTSj-IaHc","session_youtube_ff_link":"https://youtu.be/iExTSj-IaHc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=1h8m24s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T13:30:00Z","title":"ParetoTracker: Understanding Population Dynamics in Multi-objective Evolutionary Algorithms through Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1258","abstract":"Providing effective guidance for users has long been an important and challenging task for efficient exploratory visual analytics, especially when selecting variables for visualization in high-dimensional datasets. Correlation is the most widely applied metric for guidance in statistical and analytical tools, however a reliance on correlation may lead users towards false positives when interpreting causal relations in the data. In this work, inspired by prior insights on the benefits of counterfactual visualization in supporting visual causal inference, we propose a novel, simple, and efficient counterfactual guidance method to enhance causal inference performance in guided exploratory analytics based on insights and concerns gathered from expert interviews. Our technique aims to capitalize on the benefits of counterfactual approaches while reducing their complexity for users. We integrated counterfactual guidance into an exploratory visual analytics system, and using a synthetically generated ground-truth causal dataset, conducted a comparative user study and evaluated to what extent counterfactual guidance can help lead users to more precise visual causal inferences. The results suggest that counterfactual guidance improved visual causal inference performance, and also led to different exploratory behaviors compared to correlation-based guidance. Based on these findings, we offer future directions and challenges for incorporating counterfactual guidance to better support exploratory visual analytics.","accessible_pdf":false,"authors":[{"affiliations":["University of North Carolina-Chapel Hill, Chapel Hill, United States"],"email":"zeyuwang@cs.unc.edu","is_corresponding":true,"name":"Arran Zeyu Wang"},{"affiliations":["UNC-Chapel Hill, Chapel Hill, United States"],"email":"borland@renci.org","is_corresponding":false,"name":"David Borland"},{"affiliations":["University of North Carolina, Chapel Hill, United States"],"email":"gotz@unc.edu","is_corresponding":false,"name":"David Gotz"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1258","image_caption":"The proposed counterfactual guidance technique is compared with traditional correlation-based guidance through five scenarios. Using the example question \"Will coffee drinking cause differences in students' grades?\", an analyst might compare data based on coffee consumption and grade distributions. The leftmost column lists the subsets created, and charts illustrate five potential distribution combinations (a-e), suggesting different answers. Symbols at the bottom indicate which methods accurately interpret the data. Counterfactual-based approaches have advantages in two scenarios and perform equally in the other three.","keywords":["Counterfactual, Guidance, Exploratory visual analysis, Visual causal inference, Correlation"],"open_access_supplemental_link":"https://github.com/VACLab/Co-op","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ESst2nxcXuA&t=0h0m47s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1258/v-full-1258_Preview.mp4?token=VYZkWSazuZO9S5NwEeptlwzOFMY2nnIEGDfaeGEKwyY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1258/v-full-1258_Preview.srt?token=pMJxvpJBUzRXIXcEYTAns27ai8gHIQp3yOwkNGtXc58&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-full","session_youtube_ff_id":"xFxX4tX8KKM","session_youtube_ff_link":"https://youtu.be/xFxX4tX8KKM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=0h0m47s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T12:30:00Z","title":"Beyond Correlation: Incorporating Counterfactual Guidance to Better Support Exploratory Visual Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1309","abstract":"Visualizations play a critical role in validating and improving statistical models. However, the design space of model check visualizations is not well understood, making it difficult for authors to explore and specify effective graphical model checks. VMC defines a model check visualization using four components: (1) samples of distributions of checkable quantities generated from the model,including predictive distributions for new data and distributions of model parameters; (2) transformations on observed data to facilitate comparison; (3) visual representations of distributions; and (4) layouts to facilitate comparing model samples and observed data. We contribute an implementation of VMC as an R package. We validate VMC by reproducing a set of canonical model check examples, and show how using VMC to generate model checks reduces the edit distance between visualizations relative to existing visualization toolkits. The findings of an interview study with three expert modelers who used VMC highlight challenges and opportunities for encouraging exploration of correct, effective model check visualizations.","accessible_pdf":false,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"ziyangguo1030@gmail.com","is_corresponding":true,"name":"Ziyang Guo"},{"affiliations":["University of Chicago, Chicago, United States"],"email":"kalea@uchicago.edu","is_corresponding":false,"name":"Alex Kale"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"jhullman@northwestern.edu","is_corresponding":false,"name":"Jessica Hullman"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1309","image_caption":"Example model check visualizations authored with VMC, using data from [ 46 ]. From left to right: checks on the density curves of the distributions of model predictions and observed data from (A) response variable to (B) distributional parameter; follow-up checks conditional on the quantitative predictor, where VMC is used to specify (C) Hypothetical Outcome Plots and (D) a line + ribbon plot; (E) a facet check stratifying the random effects and (F) a multilevel check; more checks for the random effects specified by VMC, including (G) raincloud plots and (H) multiple-interval plots; and residual checks specified by VMC, including (I) residual plots revealing the heteroskedasticity of the model and (J) Q-Q plots, validating the normality of residuals.","keywords":["Model checking and evaluation; Uncertainty visualization; Grammar of Graphics"],"open_access_supplemental_link":"https://mucollective.github.io/vmc/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ESst2nxcXuA&t=0h24m48s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1309/v-full-1309_Preview.mp4?token=_d8vt5BoHauPleUOak6d14mJ7U4ji1klBajeDY0bUv0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1309/v-full-1309_Preview.srt?token=rWwzC9Gl8Y-pZxPODj45nY8nXCPwX6GWxHTDBCMvW5k&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-full","session_youtube_ff_id":"OqNLDTwT7DY","session_youtube_ff_link":"https://youtu.be/OqNLDTwT7DY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=0h24m48s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T12:54:00Z","title":"VMC: A Grammar for Visualizing Statistical Model Checks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1547","abstract":"Visual validation of regression models in scatterplots is a common practice for assessing model quality, yet its efficacy remains unquantified. We conducted two empirical experiments to investigate individuals\u2019 ability to visually validate linear regression models (linear trends) and to examine the impact of common visualization designs on validation quality. The first experiment showed that the level of accuracy for visual estimation of slope (i.e., fitting a line to data) is higher than for visual validation of slope (i.e., accepting a shown line). Notably, we found bias toward slopes that are \u201ctoo steep\u201d in both cases. This lead to novel insights that participants naturally assessed regression with orthogonal distances between the points and the line (i.e., ODR regression) rather than the common vertical distances (OLS regression). In the second experiment, we investigated whether incorporating common designs for regression visualization (error lines, bounding boxes, and confidence intervals) would improve visual validation. Even though error lines reduced validation bias, results failed to show the desired improvements in accuracy for any design. Overall, our findings suggest caution in using visual model validation for linear trends in scatterplots.","accessible_pdf":false,"authors":[{"affiliations":["University of Cologne, Cologne, Germany"],"email":"braun@cs.uni-koeln.de","is_corresponding":true,"name":"Daniel Braun"},{"affiliations":["Tufts University, Medford, United States"],"email":"remco@cs.tufts.edu","is_corresponding":false,"name":"Remco Chang"},{"affiliations":["University of Wisconsin - Madison, Madison, United States"],"email":"gleicher@cs.wisc.edu","is_corresponding":false,"name":"Michael Gleicher"},{"affiliations":["University of Cologne, Cologne, Germany"],"email":"landesberger@cs.uni-koeln.de","is_corresponding":false,"name":"Tatiana von Landesberger"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1547","image_caption":"\u201cVisual summary\u201d of visual validation and estimation accuracy for linear trends in scatterplots. The figure shows the true regression line (green) for OLS together with participants\u2019 average response for estimation (blue) and the range of lines with an acceptance rate of 50% or higher for validation (orange).","keywords":["Perception, visual model validation, visual model estimation, user study, information visualization"],"open_access_supplemental_link":"https://visva.cs.uni-koeln.de/en/publications/beware-of-validation-by-eye-visual-validation-of-linear-trends-in-scatterplots","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.11625","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ESst2nxcXuA&t=0h15m14s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1547/v-full-1547_Preview.mp4?token=SSXo5mnJOmbYrf5k4_9Y7_5zEcT5DmSz-KJd7OTPcyo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1547/v-full-1547_Preview.srt?token=LLrihZt2TX9EdY69thgGKMo33P18_McHP5ewTqId8Cc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-full","session_youtube_ff_id":"-Ohr2rTpvXI","session_youtube_ff_link":"https://youtu.be/-Ohr2rTpvXI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=0h15m14s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T12:42:00Z","title":"Beware of Validation by Eye: Visual Validation of Linear Trends in Scatterplots","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233302308","abstract":"We visualize the predictions of multiple machine learning models to help biologists as they interactively make decisions about cell lineage---the development of a (plant) embryo from a single ovum cell. Based on a confocal microscopy dataset, traditionally biologists manually constructed the cell lineage, starting from this observation and reasoning backward in time to establish their inheritance. To speed up this tedious process, we make use of machine learning (ML) models trained on a database of manually established cell lineages to assist the biologist in cell assignment. Most biologists, however, are not familiar with ML, nor is it clear to them which model best predicts the embryo's development. We thus have developed a visualization system that is designed to support biologists in exploring and comparing ML models, checking the model predictions, detecting possible ML model mistakes, and deciding on the most likely embryo development. To evaluate our proposed system, we deployed our interface with six biologists in an observational study. Our results show that the visual representations of machine learning are easily understandable, and our tool, LineageD+, could potentially increase biologists' working efficiency and enhance the understanding of embryos.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Jiayi Hong"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ross Maciejewski"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alain Trubuil"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tobias Isenberg"}],"award":"","doi":"10.1109/TVCG.2023.3302308","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233302308","image_caption":"In this paper, we examine the human-AI interaction within the context of plant embryo lineage analysis. To facilitate this investigation, we developed a system called LineageD+, which visualizes predictions from multiple machine learning models. This system aims to assist biologists in reconstructing the development history of plant embryos.","keywords":["Visualization, visual analytics, machine learning, comparing ML predictions, human-AI teaming, plant biology, cell lineage"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-04212205","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/ESst2nxcXuA&t=0h41m31s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233302308/v-tvcg-20233302308_Preview.mp4?token=RobmTzM0OBSW6v7647_8RCVKa3CfNSnTAxYRFWKOzO0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233302308/v-tvcg-20233302308_Preview.srt?token=5D89AFZIBjrJvDk3bMTaK7lAJV1HGylEQCaJuUcqiKk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full21","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Model-checking and Validation","session_uid":"v-tvcg","session_youtube_ff_id":"reu4ziIvQYk","session_youtube_ff_link":"https://youtu.be/reu4ziIvQYk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/ESst2nxcXuA&t=0h41m31s","sessions":["Model-checking and Validation"],"time_stamp":"2024-10-17T13:06:00Z","title":"Visualizing and Comparing Machine Learning Predictions to Improve Human-AI Teaming on the Example of Cell Lineage","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1026","abstract":"We present a visual analytics approach for multi-level visual exploration of users' interaction strategies in an interactive digital environment. The use of interactive touchscreen exhibits in informal learning environments, such as museums and science centers, often incorporate frameworks that classify learning processes, such as Bloom\u2019s taxonomy, to achieve better user engagement and knowledge transfer. To analyze user behavior within these digital environments, interaction logs are recorded to capture diverse exploration strategies. However, analysis of such logs is challenging, especially in terms of coupling interactions and cognitive learning processes, and existing work within learning and educational contexts remains limited. To address these gaps, we develop a visual analytics approach for analyzing interaction logs that supports exploration at the individual user level and multi-user comparison. The approach utilizes algorithmic methods to identify similarities in users' interactions and reveal their exploration strategies. We motivate and illustrate our approach through an application scenario, using event sequences derived from interaction log data in an experimental study conducted with science center visitors from diverse backgrounds and demographics. The study involves 14 users completing tasks of increasing complexity, designed to stimulate different levels of cognitive learning processes. We implement our approach in an interactive visual analytics prototype system, named VISID, and together with domain experts, discover a set of task-solving exploration strategies, such as \"cascading\" and \"nested-loop\", which reflect different levels of learning processes from Bloom's taxonomy. Finally, we discuss the generalizability and scalability of the presented system and the need for further research with data acquired in the wild.","accessible_pdf":true,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"peilin.yu@liu.se","is_corresponding":true,"name":"Peilin Yu"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"aida.vitoria@liu.se","is_corresponding":false,"name":"Aida Nordman"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"marta.koc-januchta@liu.se","is_corresponding":false,"name":"Marta M. Koc-Januchta"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden","Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"konrad.schonborn@liu.se","is_corresponding":false,"name":"Konrad J Sch\u00f6nborn"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"lonni.besancon@gmail.com","is_corresponding":false,"name":"Lonni Besan\u00e7on"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"katerina.vrotsou@liu.se","is_corresponding":false,"name":"Katerina Vrotsou"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1026","image_caption":"The main components of VISID comprise an Individual View (upper) and a Comparison View (lower). The Individual View can be alternated to visualize: (a) a participant's attribute change and interaction event sequences, or (b) their interface event sequences representing the concurrently opened infopanels and their lifetime duration. The Comparison View consists of three parts. From left to right, it visualizes interaction sequences ranked by the similarity score to a baseline participant in descending order. The similarity score bars and delta values (middle) depict the similarity/dissimilarity with respect to the baseline participant. The Cluster View (right) shows potential clusters of similar participants.","keywords":["Visual analytics, Visualization systems and tools, Interaction logs, Visualization techniques, Visual learning"],"open_access_supplemental_link":"https://osf.io/wnz32/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.31219/osf.io/4yc8s","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/M8JHVCnERRk&t=0h0m30s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1026/v-full-1026_Preview.mp4?token=MfxoMMoKp4WxUS5LilcXPAr5s9n5tgd6GhmKH7e3Zp4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1026/v-full-1026_Preview.srt?token=NwPulJzqSk6dZeJQIrKFDRhPWCOLO7ikFkIqpCl5Pxg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-full","session_youtube_ff_id":"H9JJoBZBGNk","session_youtube_ff_link":"https://youtu.be/H9JJoBZBGNk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h0m30s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T14:15:00Z","title":"Revealing Interaction Dynamics: Multi-Level Visual Exploration of User Strategies with an Interactive Digital Environment","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1642","abstract":"Despite the development of numerous visual analytics tools for event sequence data across various domains, including but not limited to healthcare, digital marketing, and user behavior analysis, comparing these domain-specific investigations and transferring the results to new datasets and problem areas remain challenging. Task abstractions can help us go beyond domain-specific details, but existing visualization task abstractions are insufficient for event sequence visual analytics because they primarily focus on multivariate datasets and often overlook automated analytical techniques. To address this gap, we propose a domain-agnostic multi-level task framework for event sequence analytics, derived from an analysis of 58 papers that present event sequence visualization systems. Our framework consists of four levels: objective, intent, strategy, and technique. Overall objectives identify the main goals of analysis. Intents comprises five high-level approaches adopted at each analysis step: augment data, simplify data, configure data, configure visualization, and manage provenance. Each intent is accomplished through a number of strategies, for instance, data simplification can be achieved through aggregation, summarization, or segmentation. Finally, each strategy can be implemented by a set of techniques depending on the input and output components. We further show that each technique can be expressed through a quartet of action-input-output-criteria. We demonstrate the framework\u2019s descriptive power through case studies and discuss its similarities and differences with previous event sequence task taxonomies.","accessible_pdf":false,"authors":[{"affiliations":["University of Maryland, College Park, College Park, United States"],"email":"kzintas@umd.edu","is_corresponding":true,"name":"Kazi Tasnim Zinat"},{"affiliations":["University of Maryland, College Park, United States"],"email":"ssakhamu@terpmail.umd.edu","is_corresponding":false,"name":"Saimadhav Naga Sakhamuri"},{"affiliations":["University of Maryland, College Park, United States"],"email":"achen151@terpmail.umd.edu","is_corresponding":false,"name":"Aaron Sun Chen"},{"affiliations":["University of Maryland, College Park, United States"],"email":"leozcliu@umd.edu","is_corresponding":false,"name":"Zhicheng Liu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1642","image_caption":"From bigger picture to finer details Our four-tier framework consists of four levels: Objectives, Intents, Strategies, and Techniques, providing a common language to enhance cross-domain collaboration and tool evaluation.","keywords":["Task Abstraction, Event Sequence Data"],"open_access_supplemental_link":"https://osf.io/bkjsc/?view_only=b95871b8c4ae497ab9b6cb565e28edf5","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/M8JHVCnERRk&t=0h25m16s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1642/v-full-1642_Preview.mp4?token=6EC8R0dh9lTx_Yo63GE8xfzo74rMtLPiKE-sX05YXik&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1642/v-full-1642_Preview.srt?token=DCfmsMRFf68vDodSJcDYHRDq3OopfC9esYpGXacq6as&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-full","session_youtube_ff_id":"4WP9eGQ_hwI","session_youtube_ff_link":"https://youtu.be/4WP9eGQ_hwI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h25m16s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T14:39:00Z","title":"A Multi-Level Task Framework for Event Sequence Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243358919","abstract":"We conduct two in-lab experiments (N=93) to evaluate the effectiveness of Gantt charts, extended Gantt charts, and stringline charts for visualizing fixed-order event sequence data. We first formulate five types of event sequences and define three types of sequence elements: point events, interval events, and the temporal gaps between them. Our two experiments focus on event sequences with a pre-defined, fixed order, and measure task error rates and completion time. The first experiment shows single sequences and assesses the three charts' performance in comparing event duration or gap. The second experiment shows multiple sequences and evaluates how well the charts reveal temporal patterns. The results suggest that when visualizing single fixed-order event sequences, 1) Gantt and extended Gantt charts lead to comparable error rates in the duration-comparing task; 2) Gantt charts exhibit either shorter or equal completion time than extended Gantt charts; 3) both Gantt and extended Gantt charts demonstrate shorter completion times than stringline charts; 4) however, stringline charts outperform the other two charts with fewer errors in the comparing task when event type counts are high. Additionally, when visualizing multiple point-based fixed-order event sequences, stringline charts require less time than Gantt charts for people to find temporal patterns. Based on these findings, we discuss design opportunities for visualizing fixed-order event sequences and discuss future avenues for optimizing these charts.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Junxiu Tang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Fumeng Yang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jiang Wu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yifang Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jiayi Zhou"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xiwen Cai"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lingyun Yu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"10.1109/TVCG.2024.3358919","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243358919","image_caption":"In two lab experiments with 93 participants, we assessed the performance of Gantt, extended Gantt, and stringline charts for visualizing fixed-order event sequences. We introduced five event sequence types with point events, interval events, and temporal gaps. Experiment 1 focused on comparing event duration or gaps in single sequences, while Experiment 2 assessed pattern detection in multiple sequences. Results indicate Gantt and extended Gantt charts had similar error rates and faster completion times than stringline charts for single sequence. However, stringline charts were more accurate with numerous event types. For multiple sequences, stringline charts are quicker for pattern detection.","keywords":["Gantt chart, stringline chart, Marey's graph, event sequence, empirical study"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/zpdne","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/M8JHVCnERRk&t=0h49m0s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243358919/v-tvcg-20243358919_Preview.mp4?token=YP7eNz4W93N5GiZNwbr-U7L-UQEm6jMkbOb3v4o0YBQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243358919/v-tvcg-20243358919_Preview.srt?token=MxOnHDpxuV2ztsYfQDv6pLK3RFbuY1mUEGyHlXl0UCU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-tvcg","session_youtube_ff_id":"PTsFxQUWvIE","session_youtube_ff_link":"https://youtu.be/PTsFxQUWvIE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h49m0s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T15:03:00Z","title":"A Comparative Study on Fixed-order Event Sequence Visualizations: Gantt, Extended Gantt, and Stringline Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243364388","abstract":"Seasonal-trend decomposition based on loess (STL) is a powerful tool to explore time series data visually. In this paper, we present an extension of STL to uncertain data, named uncertainty-aware STL (UASTL). Our method propagates multivariate Gaussian distributions mathematically exactly through the entire analysis and visualization pipeline. Thereby, stochastic quantities shared between the components of the decomposition are preserved. Moreover, we present application scenarios with uncertainty modeling based on Gaussian processes, e.g., data with uncertain areas or missing values. Besides these mathematical results and modeling aspects, we introduce visualization techniques that address the challenges of uncertainty visualization and the problem of visualizing highly correlated components of a decomposition. The global uncertainty propagation enables the time series visualization with STL-consistent samples, the exploration of correlation between and within decomposition's components, and the analysis of the impact of varying uncertainty. Finally, we show the usefulness of UASTL and the importance of uncertainty visualization with several examples. Thereby, a comparison with conventional STL is performed.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Tim Krake"},{"affiliations":"","email":"","is_corresponding":false,"name":"Daniel Kl\u00f6tzl"},{"affiliations":"","email":"","is_corresponding":false,"name":"David H\u00e4gele"},{"affiliations":"","email":"","is_corresponding":false,"name":"Daniel Weiskopf"}],"award":"","doi":"10.1109/TVCG.2024.3364388","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243364388","image_caption":"Seasonal-trend decomposition based on loess (STL) is used to visually explore time series. Our extension to uncertain data (UASTL) propagates uncertainty mathematically exactly through the entire analysis and visualization pipeline. Thereby, stochastic quantities shared between the components of the decomposition are preserved. Moreover, application scenarios with uncertainty modeling are presented and visualization techniques are introduced that address the challenges of uncertainty visualization and the problem of visualizing highly correlated components of a decomposition. The global uncertainty propagation enables the exploration of correlation and a sensitivity analysis to study the impact of varying uncertainty.","keywords":["- I.6.9.g Visualization techniques and methodologies < I.6.9 Visualization < I.6 Simulation, Modeling, and Visualization < I Compu - G.3 Probability and Statistics < G Mathematics of Computing - G.3.n Statistical computing < G.3 Probability and Statistics < G Mathematics of Computing - G.3.p Stochastic processes < G.3 Probability and Statistics < G Mathematics of Computing"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/M8JHVCnERRk&t=0h12m56s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243364388/v-tvcg-20243364388_Preview.mp4?token=BCg7deigEVGTPFbGM0Hp_7U9pRNUNevSKYv9dhp_GWY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243364388/v-tvcg-20243364388_Preview.srt?token=fI63P6FvvRjgqvHg18msLSIGv-lLiAEzbr-KmGlhudw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-tvcg","session_youtube_ff_id":"PMo1LcjeZFY","session_youtube_ff_link":"https://youtu.be/PMo1LcjeZFY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h12m56s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T14:27:00Z","title":"Uncertainty-Aware Seasonal-Trend Decomposition Based on Loess","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243376406","abstract":"Visualizing event timelines for collaborative text writing is an important application for navigating and understanding such data, as time passes and the size and complexity of both text and timeline increase. They are often employed by applications such as code repositories and collaborative text editors. In this paper, we present a visualization tool to explore historical records of writing of legislative texts, which were discussed and voted on by an assembly of representatives. Our visualization focuses on event timelines from text documents that involve multiple people and different topics, allowing for observation of different proposed versions of said text or tracking data provenance of given text sections, while highlighting the connections between all elements involved. We also describe the process of designing such a tool alongside domain experts, with three steps of evaluation being conducted to verify the effectiveness of our design.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Gabriel D. Cantareira"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yiwen Xing"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nicholas Cole"},{"affiliations":"","email":"","is_corresponding":true,"name":"Rita Borgo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alfie Abdul-Rahman"}],"award":"","doi":"10.1109/TVCG.2024.3376406","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243376406","image_caption":"This picture presents multiple views of the timeline of a historical document, showing multiple versions interacting over time (top) and a detailed breakdown of a version with selectable components (bottom).","keywords":["Data visualization, Collaboration, History, Humanities, Writing, Navigation, Metadata"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/M8JHVCnERRk&t=0h55m53s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243376406/v-tvcg-20243376406_Preview.mp4?token=OjcNXImlbRw700hwSXy06KGQ6tEBUWc_a5ltQnoFgr4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243376406/v-tvcg-20243376406_Preview.srt?token=6nuoKdWt-oYrUTUkWIZAL1xCDEwD-SD_I1GSVfblXfQ&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-tvcg","session_youtube_ff_id":"qp4KUQLtxbM","session_youtube_ff_link":"https://youtu.be/qp4KUQLtxbM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h55m53s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T15:15:00Z","title":"Interactive Hierarchical Timeline for Collaborative Text Negotiation in Historical Records","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243382760","abstract":"Time-stamped event sequences (TSEQs) are time-oriented data without value information, shifting the focus of users to the exploration of temporal event occurrences. TSEQs exist in application domains, such as sleeping behavior, earthquake aftershocks, and stock market crashes. Domain experts face four challenges, for which they could use interactive and visual data analysis methods. First, TSEQs can be large with respect to both the number of sequences and events, often leading to millions of events. Second, domain experts need validated metrics and features to identify interesting patterns. Third, after identifying interesting patterns, domain experts contextualize the patterns to foster sensemaking. Finally, domain experts seek to reduce data complexity by data simplification and machine learning support. We present IVESA, a visual analytics approach for TSEQs. It supports the analysis of TSEQs at the granularities of sequences and events, supported with metrics and feature analysis tools. IVESA has multiple linked views that support overview, sort+filter, comparison, details-on-demand, and metadata relation-seeking tasks, as well as data simplification through feature analysis, interactive clustering, filtering, and motif detection and simplification. We evaluated IVESA with three case studies and a user study with six domain experts working with six different datasets and applications. Results demonstrate the usability and generalizability of IVESA across applications and cases that had up to 1,000,000 events.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"J\u00fcrgen Bernard"},{"affiliations":"","email":"","is_corresponding":false,"name":"Clara-Maria Barth"},{"affiliations":"","email":"","is_corresponding":false,"name":"Eduard Cuba"},{"affiliations":"","email":"","is_corresponding":false,"name":"Andrea Meier"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yasara Peiris"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ben Shneiderman"}],"award":"","doi":"10.1109/TVCG.2024.3382760","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243382760","image_caption":"Overview of IVESA. On the left, the Sequence Overview and Details View primarily enable the analysis of the TSEQs content, i.e., events, event sequences, groups of event sequences, motifs, and features. On the right, the Metadata View supports the analysis of metadata attributes and the TSEQs contextualization, whereas the Summary View includes the entry point to auxiliary views for filtering, motif configuration, feature analysis, and clustering.","keywords":["Time-Stamped Event Sequences, Time-Oriented Data, Visual Analytics, Data-First Design Study, Iterative Design, Visual Interfaces, User Evaluation"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/M8JHVCnERRk&t=0h37m42s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243382760/v-tvcg-20243382760_Preview.mp4?token=j-q4DH0nD7WReNPJBJp9K2SNPr_8QxawbxwqV-8QMpA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243382760/v-tvcg-20243382760_Preview.srt?token=azEobpUAVOriiRVFx4PMGdKRcO5j2ztPWCWVuKbLIOE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full22","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Time and Sequences","session_uid":"v-tvcg","session_youtube_ff_id":"7ffZxu1Nkgo","session_youtube_ff_link":"https://youtu.be/7ffZxu1Nkgo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/M8JHVCnERRk&t=0h37m42s","sessions":["Time and Sequences"],"time_stamp":"2024-10-16T14:51:00Z","title":"Visual Analysis of Time-Stamped Event Sequences","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1137","abstract":"Abstract\u2014Inspired by recent advances in digital fabrication, artists and scientists have demonstrated that physical data encodings (i.e., data physicalizations) can increase engagement with data, foster collaboration, and in some cases, improve data legibility and analysis relative to digital alternatives. However, prior empirical studies have only investigated abstract data encoded in physical form (e.g., laser cut bar charts) and not continuously sampled spatial data fields relevant to climate and medical science (e.g., heights, temperatures, densities, and velocities sampled on a spatial grid). This paper presents the design and results of the first study to characterize human performance in 3D spatial data analysis tasks across analogous physical and digital visualizations. Participants analyzed continuous spatial elevation data with three visualization modalities: (1) 2D digital visualization; (2) perspective-tracked, stereoscopic \"fishtank\" virtual reality; and (3) 3D printed data physicalization. Their tasks included tracing paths downhill, looking up spatial locations and comparing their relative heights, and identifying and reporting the minimum and maximum heights within certain spatial regions. As hypothesized, in most cases, participants performed the tasks just as well or better in the physical modality (based on time and error metrics). Additional results include an analysis of open-ended feedback from participants and discussion of implications for further research on the value of data physicalization. All data and supplemental materials are available at https://osf.io/7xdq4/.","accessible_pdf":true,"authors":[{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"bridger.g.herman@gmail.com","is_corresponding":true,"name":"Bridger Herman"},{"affiliations":["Beth Israel Deaconess Medical Center, Boston, United States"],"email":"cdjackso@bidmc.harvard.edu","is_corresponding":false,"name":"Cullen D. Jackson"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"dfk@umn.edu","is_corresponding":false,"name":"Daniel F. Keefe"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1137","image_caption":"Data physicalizations provide many potential benefits over digital data displays, including haptic perception and body-centric judgments. This paper compares the effectiveness of physicalizations (left) with virtual reality (right top) and 2D visualizations (right bottom) for spatial data analysis tasks on digital elevation data common in climate science and natural resource management.","keywords":["Data physicalization, virtual reality, evaluation."],"open_access_supplemental_link":"https://osf.io/7xdq4/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/z4s9d","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/sO33xoUQ9fk&t=0h37m14s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1137/v-full-1137_Preview.mp4?token=MHGa74Psew6hZ1UpP2QgZ6SX6fMiKuMe3Ls2xjCriw0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1137/v-full-1137_Preview.srt?token=2ogKNpPzr0iJqMqzl-UOMt0D9wnbWfzqVxHbCvsqisU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-full","session_youtube_ff_id":"84IvcxzBg7U","session_youtube_ff_link":"https://youtu.be/84IvcxzBg7U","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h37m14s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T18:21:00Z","title":"Touching the Ground: Evaluating the Effectiveness of Data Physicalizations for Spatial Data Analysis Tasks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1500","abstract":"Haptic feedback provides an essential sensory stimulus crucial for interaction and analyzing three-dimensional spatio-temporal phenomena on surface visualizations. Given its ability to provide enhanced spatial perception and scene maneuverability, virtual reality (VR) catalyzes haptic interactions on surface visualizations. Various interaction modes, encompassing both mid-air and on-surface interactions---with or without the application of assisting force stimuli---have been explored using haptic force feedback devices. In this paper, we evaluate the use of on-surface and assisted on-surface haptic modes of interaction compared to a no-haptic interaction mode. A force-based haptic stylus is used for all three modalities; the on-surface mode uses collision based forces, whereas the assisted on-surface mode is accompanied by an additional snapping force. We conducted a within-subjects user study involving fundamental interaction tasks performed on surface visualizations. Keeping a consistent visual design across all three modes, our study incorporates tasks that require the localization of the highest, lowest, and random points on surfaces; and tasks that focus on brushing curves on surfaces with varying complexity and occlusion levels. Our findings show that participants took almost the same time to brush curves using all the interaction modes. They could draw smoother curves using the on-surface interaction modes compared to the no-haptic mode. However, the assisted on-surface mode provided better accuracy than the on-surface mode. The on-surface mode was slower in point localization, but the accuracy depended on the visual cues and occlusions associated with the tasks. Finally, we discuss participant feedback on using haptic force feedback as a tangible input modality and share takeaways to aid the design of haptics-based tangible interactions for surface visualizations. ","accessible_pdf":false,"authors":[{"affiliations":["University of Calgary, Calgary, Canada"],"email":"hamza.afzaal@ucalgary.ca","is_corresponding":true,"name":"Hamza Afzaal"},{"affiliations":["University of Calgary, Calgary, Canada"],"email":"ualim@ucalgary.ca","is_corresponding":false,"name":"Usman Alim"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1500","image_caption":"The figure shows how a force-based haptic stylus (middle-top) is used to interact with 3D surface visualizations. A virtual stylus (left) is used to interact with the surface, with an assistive force (middle-bottom) that activates when the stylus enters \"snap zone\" (S) above the surface (M). The forces in snap zone are calculated using a combination of spring and snapping forces. The paths traced by participants (right) illustrate how the stylus aligns with the surface geometry, guided by these snapping forces, while the surface texture and the Laplacian of the distance transform emphasize the smoothness and accuracy of the paths.","keywords":["Scalar Field Data, Guidelines, Interaction Design, Human-Subjects Quantitative Studies, Domain Agnostic, Isosurface Techniques, Computer Graphics Techniques, AR/VR/Immersive, Specialized Input/Display Hardware"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.48550/arXiv.2408.04031","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/sO33xoUQ9fk&t=0h48m44s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1500/v-full-1500_Preview.mp4?token=jgv8mXTzZajUyl2IUm_dK6-zrdp-umkLuenVdRWoJN0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1500/v-full-1500_Preview.srt?token=7SpPLecKhHGy023qP07DEm035Bsvx364jPfUa-tXgeI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-full","session_youtube_ff_id":"jJowp-dAYp8","session_youtube_ff_link":"https://youtu.be/jJowp-dAYp8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h48m44s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T18:33:00Z","title":"Evaluating Force-based Haptics for Immersive Tangible Interactions with Surface Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1522","abstract":"Despite the recent surge of research efforts to make data visualizations accessible to people who are blind or have low vision (BLV), how to support BLV people's data analysis remains an important and challenging question. As refreshable tactile displays (RTDs) become cheaper and conversational agents continue to improve, their combination provides a promising approach to support BLV people's interactive data exploration and analysis. To understand how BLV people would use and react to a system combining an RTD with a conversational agent, we conducted a Wizard-of-Oz study with 11 BLV participants, where they interacted with line charts, bar charts, and isarithmic maps. Our analysis of participants' interactions led to the identification of nine distinct patterns. We also learned that the choice of modalities depended on the type of task and prior experience with tactile graphics, and that participants strongly preferred the combination of RTD and speech to a single modality. In addition, participants with more tactile experience described how tactile images facilitated a deeper engagement with the data and supported independent interpretation. Our findings will inform the design of interfaces for such interactive mixed-modality systems.","accessible_pdf":true,"authors":[{"affiliations":["Monash University, Melbourne, Australia"],"email":"samuel.reinders@monash.edu","is_corresponding":false,"name":"Samuel Reinders"},{"affiliations":["Monash University, Melbourne, Australia"],"email":"matthew.butler@monash.edu","is_corresponding":false,"name":"Matthew Butler"},{"affiliations":["Monash University, Clayton, Australia"],"email":"ingrid.zukerman@monash.edu","is_corresponding":false,"name":"Ingrid Zukerman"},{"affiliations":["Yonsei University, Seoul, Korea, Republic of","Microsoft Research, Redmond, United States"],"email":"b.lee@yonsei.ac.kr","is_corresponding":false,"name":"Bongshin Lee"},{"affiliations":["Monash University, Melbourne, Australia"],"email":"lizhen.qu@monash.edu","is_corresponding":false,"name":"Lizhen Qu"},{"affiliations":["Monash University, Melbourne, Australia"],"email":"kim.marriott@monash.edu","is_corresponding":true,"name":"Kim Marriott"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1522","image_caption":"We explored how refreshable tactile displays (RTDs) can be combined with conversational agents to assist people who are blind or have low vision (BLV) in undertaking data analysis activities. We used a Wizard-of-Oz method, allowing participants to manipulate charts rendered on the RTD, perform touch gestures, and ask the conversational agent questions to aid their understanding. Pictured is an RTD with a stacked bar chart rendered on the screen. A user is reaching out with both hands, touching raised pins on the RTD that make up the different components of the bar chart. ","keywords":["Accessible data visualization, refreshable tactile displays, conversational agents, interactive data exploration, Wizard of Oz study, people who are blind or have low vision"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.04806","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/sO33xoUQ9fk&t=0h25m24s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1522/v-full-1522_Preview.mp4?token=SEEy97d0dqlKpk3cT4vIQZNToqbJOTSBwqAygXATai4&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-full","session_youtube_ff_id":"Xw469H8JWP4","session_youtube_ff_link":"https://youtu.be/Xw469H8JWP4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h25m24s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T18:09:00Z","title":"When Refreshable Tactile Displays Meet Conversational Agents: Investigating Accessible Data Presentation and Analysis with Touch and Speech","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1626","abstract":"We propose and study a novel cross-reality environment that seamlessly integrates a monoscopic 2D surface (an interactive screen with touch and pen input) with a stereoscopic 3D space (an augmented reality HMD) to jointly host spatial data visualizations. This innovative approach combines the best of two conventional methods of displaying and manipulating spatial 3D data, enabling users to fluidly explore diverse visual forms using tailored interaction techniques. Providing such effective 3D data exploration techniques is pivotal for conveying its intricate spatial structures---often at multiple spatial or semantic scales---across various application domains and requiring diverse visual representations for effective visualization. To understand user reactions to our new environment, we began with an elicitation user study, in which we captured their responses and interactions. We observed that users adapted their interaction approaches based on perceived visual representations, with natural transitions in spatial awareness and actions while navigating across the physical surface. Our findings then informed the development of a design space for spatial data exploration in cross-reality. We thus developed cross-reality environments tailored to three distinct domains: for 3D molecular structure data, for 3D point cloud data, and for 3D anatomical data. In particular, we designed interaction techniques that account for the inherent features of interactions in both spaces, facilitating various forms of interaction, including mid-air gestures, touch interactions, pen interactions, and combinations thereof, to enhance the users' sense of presence and engagement. We assessed the usability of our environment with biologists, focusing on its use for domain research. In addition, we evaluated our interaction transition designs with virtual and mixed-reality experts to gather further insights. As a result, we provide our design suggestions for the cross-reality environment, emphasizing the interaction with diverse visual representations and seamless interaction transitions between 2D and 3D spaces.","accessible_pdf":false,"authors":[{"affiliations":["Xi'an Jiaotong-Liverpool University, Suzhou, China"],"email":"lixiang.zhao17@student.xjtlu.edu.cn","is_corresponding":true,"name":"Lixiang Zhao"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tobias.isenberg@gmail.com","is_corresponding":false,"name":"Tobias Isenberg"},{"affiliations":["Xi'an Jiaotong-Liverpool University, Suzhou, China"],"email":"fuqi.xie20@student.xjtlu.edu.cn","is_corresponding":false,"name":"Fuqi Xie"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"hainingliang@hkust-gz.edu.cn","is_corresponding":false,"name":"Hai-Ning Liang"},{"affiliations":["Xi'an Jiaotong-Liverpool University, Suzhou, China"],"email":"lingyun.yu@xjtlu.edu.cn","is_corresponding":false,"name":"Lingyun Yu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1626","image_caption":"SpatialTouch is a novel cross-reality environment that seamlessly integrates a monoscopic 2D surface (an interactive screen with touch and pen input) with a stereoscopic 3D space (an augmented reality HMD) to jointly host spatial data visualizations. This innovative approach combines the best of two conventional methods of displaying and manipulating spatial 3D data, enabling users to fluidly explore diverse visual forms using tailored interaction techniques. Providing such effective 3D data exploration techniques is pivotal for conveying its intricate spatial structures---often at multiple spatial or semantic scales---across various application domains and requiring diverse visual representations for effective visualization.","keywords":["Spatial data, immersive visualization, cross reality, interaction techniques"],"open_access_supplemental_link":"https://osf.io/avxr9","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14833","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/sO33xoUQ9fk&t=0h59m39s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1626/v-full-1626_Preview.mp4?token=sK3xgzYvz9vy3e12YjM6EeAlIsksBkwUQRHAkZSg-e0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1626/v-full-1626_Preview.srt?token=nNi8GNmk5EYUK_KG5cb0-5UOBQf0CamZesLxXXKI1Zo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-full","session_youtube_ff_id":"C-F1zT-UgsE","session_youtube_ff_link":"https://youtu.be/C-F1zT-UgsE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h59m39s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T18:45:00Z","title":"SpatialTouch: Exploring Spatial Data Visualizations in Cross-reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1917","abstract":"The importance of data charts is self-evident, given their ability to express complex data in a simple format that facilitates quick and easy comparisons, analysis, and consumption. However, the inherent visual nature of the charts creates barriers for people with visual impairments to reap the associated benefits to the same extent as their sighted peers. While extant research has predominantly focused on understanding and addressing these barriers for blind screen reader users, the needs of low-vision screen magnifier users have been largely overlooked. In an interview study, almost all low-vision participants stated that it was challenging to interact with data charts on small screen devices such as smartphones and tablets, even though they could technically \u201csee\u201d the chart content. They ascribed these challenges mainly to the magnification induced loss of visual context that connected data points with each other and also with chart annotations, e.g., axis values. In this paper, we present a method that addresses this problem by automatically transforming charts that are typically non-interactive images into personalizable interactive charts which allow selective viewing of desired data points and preserve visual context as much as possible under screen enlargement. We evaluated our method in a usability study with 26 low-vision participants, who all performed a set of representative chart-related tasks under different study conditions. In the study, we observed that our method significantly improved the usability of charts over both the status quo screen magnifier and a state-of-the-art space compaction-based solution. ","accessible_pdf":true,"authors":[{"affiliations":["Old Dominion University, Norfolk, United States"],"email":"yprak001@odu.edu","is_corresponding":true,"name":"Yash Prakash"},{"affiliations":["Old Dominion University, Norfolk, United States"],"email":"pkhan002@odu.edu","is_corresponding":false,"name":"Pathan Aseef Khan"},{"affiliations":["Old Dominion University, Norfolk, United States"],"email":"anaya001@odu.edu","is_corresponding":false,"name":"Akshay Kolgar Nayak"},{"affiliations":["Old Dominion University, Norfolk, United States"],"email":"uksjayarathna@gmail.com","is_corresponding":false,"name":"Sampath Jayarathna"},{"affiliations":["Michigan State University, East Lansing, United States"],"email":"leehaena@msu.edu","is_corresponding":false,"name":"Hae-Na Lee"},{"affiliations":["Old Dominion University, Norfolk, United States"],"email":"vganjigu@odu.edu","is_corresponding":false,"name":"Vikas Ashok"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1917","image_caption":"This figure illustrates the user journey for GraphLite, highlighting how low-vision users enhance data visualization on smartphones. The journey begins with users swiping up to access a theme picker, adjusting visual elements like contrast, colors, and font size (a). Next, they use a customization menu to filter and view specific data points, navigating options with the \"Next\" button and finalizing with \"Done,\" while also using a slide gesture to navigate selections (b). Finally, users personalize the visualization by adjusting bar colors, improving data interpretation and accessibility (c).","keywords":["Low vision, Graph usability, Screen magnifer, Graph perception, Accessibility"],"open_access_supplemental_link":"https://github.com/accessodu/GraphLite.git","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/sO33xoUQ9fk&t=0h13m7s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1917/v-full-1917_Preview.mp4?token=MBZ8ifJtfXtnMfRB7H2bLh5bT5dInwre0HDdcTyarSE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1917/v-full-1917_Preview.srt?token=KFhnAMhnRNglx-KY1SH0jQikfdNIWo4oj6SEmyyqAZg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-full","session_youtube_ff_id":"2R4conY9Pfw","session_youtube_ff_link":"https://youtu.be/2R4conY9Pfw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h13m7s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T17:57:00Z","title":"Towards Enhancing Low Vision Usability of Data Charts on Smartphones","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243356566","abstract":"The increasing ubiquity of data in everyday life has elevated the importance of data literacy and accessible data representations, particularly for individuals with disabilities. While prior research predominantly focuses on the needs of the visually impaired, our survey aims to broaden this scope by investigating accessible data representations across a more inclusive spectrum of disabilities. After conducting a systematic review of 152 accessible data representation papers from ACM and IEEE databases, we found that roughly 78% of existing articles center on vision impairments. In this paper, we conduct a comprehensive review of the remaining 22% of papers focused on underrepresented disability communities. We developed categorical dimensions based on accessibility, visualization, and human-computer interaction to classify the papers. These dimensions include the community of focus, issues addressed, contribution type, study methods, participants, data type, visualization type, and data domain. Our work redefines accessible data representations by illustrating their application for disabilities beyond those related to vision. Building on our literature review, we identify and discuss opportunities for future research in accessible data representations. All supplemental materials are available at https://osf.io/ yv4xm/?view only=7b36a3fbf7a14b3888029966faa3def9.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Brianna L. Wimer"},{"affiliations":"","email":"","is_corresponding":false,"name":"Laura South"},{"affiliations":"","email":"","is_corresponding":false,"name":"Keke Wu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Danielle Albers Szafir"},{"affiliations":"","email":"","is_corresponding":false,"name":"Michelle A. Borkin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ronald A. Metoyer"}],"award":"","doi":"10.1109/TVCG.2024.3356566","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243356566","image_caption":"Survey of 152 papers on accessible data visualizations showing 78% focus on visual disabilities while 22% cover other disabilities.","keywords":["Accessibility, Data Representations."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/6prxd","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/sO33xoUQ9fk&t=0h0m49s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243356566/v-tvcg-20243356566_Preview.mp4?token=F3jud3aiWaCnh5W-VS696mwebVM5hw-8x6FoAVqtDh4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243356566/v-tvcg-20243356566_Preview.srt?token=s5KpUa07Knc-IjMG-14pViBZCHwARKKaCCSB_aHf3uE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full23","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Accessibility and Touch","session_uid":"v-tvcg","session_youtube_ff_id":"Kh-u47UPXnU","session_youtube_ff_link":"https://youtu.be/Kh-u47UPXnU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/sO33xoUQ9fk&t=0h0m49s","sessions":["Accessibility and Touch"],"time_stamp":"2024-10-17T17:45:00Z","title":"Beyond Vision Impairments: Redefining the Scope of Accessible Data Representations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1302","abstract":"We present the results of an exploratory study on how pairs interact with speech commands and touch gestures on a wall-sized display during a collaborative sensemaking task. Previous work has shown that speech commands, alone or in combination with other input modalities, can support visual data exploration by individuals. However, it is still unknown whether and how speech commands can be used in collaboration, and for what tasks. To answer these questions, we developed a functioning prototype that we used as a technology probe. We conducted an in-depth exploratory study with 10 participant pairs to analyze their interaction choices, the interplay between the input modalities, and their collaboration. While touch was the most used modality, we found that participants preferred speech commands for global operations, used them for distant interaction, and that speech interaction contributed to the awareness of the partner\u2019s actions. Furthermore, the likelihood of using speech commands during collaboration was related to the personality trait of agreeableness. Regarding collaboration styles, participants interacted with speech equally often whether they were in loosely or closely coupled collaboration. While the partners stood closer to each other during close collaboration, they did not distance themselves to use speech commands. From our findings, we derive and contribute a set of design considerations for collaborative and multimodal interactive data analysis systems. All supplemental materials are available at https://osf.io/8gpv2.","accessible_pdf":true,"authors":[{"affiliations":["University of Bremen, Bremen, Germany","University of Bremen, Bremen, Germany"],"email":"molina@uni-bremen.de","is_corresponding":true,"name":"Gabriela Molina Le\u00f3n"},{"affiliations":["LISN, Universit\u00e9 Paris-Saclay, CNRS, INRIA, Orsay, France"],"email":"anastasia.bezerianos@universite-paris-saclay.fr","is_corresponding":false,"name":"Anastasia Bezerianos"},{"affiliations":["Inria, Palaiseau, France"],"email":"olivier.gladin@inria.fr","is_corresponding":false,"name":"Olivier Gladin"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"petra.isenberg@inria.fr","is_corresponding":false,"name":"Petra Isenberg"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1302","image_caption":"Two people standing in front of the wall display; one person is moving a group of selected documents by dragging a stack of them with the index finger while the other one observes.","keywords":["Speech interaction, wall display, collaborative sensemaking, multimodal interaction, collaboration styles"],"open_access_supplemental_link":"https://osf.io/8gpv2/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.03813","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/w1Tud4nOruI&t=1h3m4s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1302/v-full-1302_Preview.mp4?token=X7Rtn5_FSND2OZ7n5tcNS3dGX4peJ_l1b467_T1gcoo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1302/v-full-1302_Preview.srt?token=g_tsHKQ3ALZ2lV4XhlpYWC82NHAFsoAG6MIV-xB4iu0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-full","session_youtube_ff_id":"-xq224J5umc","session_youtube_ff_link":"https://youtu.be/-xq224J5umc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=1h3m4s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T17:00:00Z","title":"Talk to the Wall: The Role of Speech Interaction in Collaborative Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1329","abstract":"The integration of Large Language Models (LLMs), especially ChatGPT, into education is poised to revolutionize students\u2019 learning experiences by introducing innovative conversational learning methodologies. To empower students to fully leverage the capabilities of ChatGPT in educational scenarios, understanding students\u2019 interaction patterns with ChatGPT is crucial for instructors. However, this endeavor is challenging due to the absence of datasets focused on student-ChatGPT conversations and the complexities in identifying and analyzing the evolutional interaction patterns within conversations. To address these challenges, we collected conversational data from 48 students interacting with ChatGPT in a master\u2019s level data visualization course over one semester. We then developed a coding scheme, grounded in the literature on cognitive levels and thematic analysis, to categorize students\u2019 interaction patterns with ChatGPT. Furthermore, we present a visual analytics system, StuGPTViz, that tracks and compares temporal patterns in student prompts and the quality of ChatGPT\u2019s responses at multiple scales, revealing significant pedagogical insights for instructors. We validated the system\u2019s effectiveness through expert interviews with six data visualization instructors and three case studies. The results confirmed StuGPTViz\u2019s capacity to enhance educators\u2019 insights into the pedagogical value of ChatGPT. We also discussed the potential research opportunities of applying visual analytics in education and developing AI-driven personalized learning solutions.","accessible_pdf":true,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"zchendf@connect.ust.hk","is_corresponding":true,"name":"Zixin Chen"},{"affiliations":["The Hong Kong University of Science and Technology, Sai Kung, China"],"email":"wangjiachen@zju.edu.cn","is_corresponding":false,"name":"Jiachen Wang"},{"affiliations":["Texas A","M University, College Station, United States"],"email":"xiameng9355@gmail.com","is_corresponding":false,"name":"Meng Xia"},{"affiliations":["The Hong Kong University of Science and Technology, Kowloon, Hong Kong"],"email":"kshigyo@connect.ust.hk","is_corresponding":false,"name":"Kento Shigyo"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"dliuak@connect.ust.hk","is_corresponding":false,"name":"Dingdong Liu"},{"affiliations":["Hong Kong University of Science and Technology, Hong Kong, Hong Kong"],"email":"rzhangab@connect.ust.hk","is_corresponding":false,"name":"Rong Zhang"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1329","image_caption":"We developed StuGPTViz, a visual analytics system designed to analyze and compare student interactions with ChatGPT in a master's-level data visualization course. By categorizing prompts and responses using a coding scheme grounded in literature on cognitive levels and thematic analysis, the system reveals key patterns and insights. Validated through expert interviews and case studies, StuGPTViz enhances educators' understanding of ChatGPT's pedagogical value, demonstrating the potential of visual analytics to drive AI-driven personalized learning and improve educational outcomes.","keywords":["Visual analytics for education, ChatGPT for education, student-ChatGPT interaction"],"open_access_supplemental_link":"https://github.com/CinderD/StuGPTViz_Supplemental","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.12423","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/w1Tud4nOruI&t=0h0m48s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1329/v-full-1329_Preview.mp4?token=3rbgGFvDtMShBC9lwJqoPby4KLpDPaQSfRuudFGzbmo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1329/v-full-1329_Preview.srt?token=Vu9ILrtMjr886gOJFjLuEkfBzp_gBW8c7A3hd4MdWxQ&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-full","session_youtube_ff_id":"r4bxhQuXqIM","session_youtube_ff_link":"https://youtu.be/r4bxhQuXqIM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=0h0m48s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T16:00:00Z","title":"StuGPTViz: A Visual Analytics Approach to Understand Student-ChatGPT Interactions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1368","abstract":"Synthetic Lethal (SL) relationships, though rare among the vast array of gene combinations, hold substantial promise for targeted cancer therapy. Despite advancements in AI model accuracy, there is still a significant need among domain experts for interpretive paths and mechanism explorations that align better with domain-specific knowledge, particularly due to the high costs of experimentation. To address this gap, we propose an iterative Human-AI collaborative framework with two key components: 1) Human-Engaged Knowledge Graph Refinement based on Metapath Strategies, which leverages insights from interpretive paths and domain expertise to refine the knowledge graph through metapath strategies with appropriate granularity. 2) Cross-Granularity SL Interpretation Enhancement and Mechanism Analysis, which aids experts in organizing and comparing predictions and interpretive paths across different granularities, uncovering new SL relationships, enhancing result interpretation, and elucidating potential mechanisms inferred by Graph Neural Network (GNN) models. These components cyclically optimize model predictions and mechanism explorations, enhancing expert involvement and intervention to build trust. Facilitated by SLInterpreter, this framework ensures that newly generated interpretive paths increasingly align with domain knowledge and adhere more closely to real-world biological principles through iterative Human-AI collaboration. We evaluate the framework\u2019s efficacy through a case study and expert interviews.","accessible_pdf":false,"authors":[{"affiliations":["Shanghaitech University, Shanghai, China"],"email":"jianghr2023@shanghaitech.edu.cn","is_corresponding":true,"name":"Haoran Jiang"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"shishh2023@shanghaitech.edu.cn","is_corresponding":false,"name":"Shaohan Shi"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"zhangshh2@shanghaitech.edu.cn","is_corresponding":false,"name":"Shuhao Zhang"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"zhengjie@shanghaitech.edu.cn","is_corresponding":false,"name":"Jie Zheng"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"liquan@shanghaitech.edu.cn","is_corresponding":false,"name":"Quan Li"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1368","image_caption":"SLInterpreter, based on an iterative Human-AI collaboration framework, aims at 1) Human-Engaged Knowledge Graph Refinement based on Metapath Strategies and 2) Cross-Granularity SL Interpretation Enhancement and Mechanism Analysis for domain experts. Domain experts explore new SL pairs using interpretive paths generated by a model trained on the entire data. Irrelevant or incorrect paths that may introduce noise are eliminated from the KG using appropriate metapath strategies. Subsequently, the model retrains, allowing domain experts to iteratively scrutinize predictions and interpretive paths, refining the KG. This iterative process optimizes predictions and mechanism exploration, enhancing expert participation and intervention, leading to increased trust. ","keywords":["Synthetic Lethality, Model Interpretability, Visual Analytics, Iterative Human-AI Collaboration."],"open_access_supplemental_link":"https://github.com/jianghr-shanghaitech/SLInterpreter-Demo","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14770","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/w1Tud4nOruI&t=0h14m15s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1368/v-full-1368_Preview.mp4?token=2pjLZ8kfmwZTY5NRT1gcmw7TYjbBieYXXm59fK4BgU0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1368/v-full-1368_Preview.srt?token=m4Eev96u53ePZ8jNtMBDWEOvYZGzxUCUmnR8PHCT350&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-full","session_youtube_ff_id":"eaCCRPrMxk8","session_youtube_ff_link":"https://youtu.be/eaCCRPrMxk8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=0h14m15s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T16:12:00Z","title":"SLInterpreter: An Exploratory and Iterative Human-AI Collaborative System for GNN-based Synthetic Lethal Prediction","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1487","abstract":"Referential gestures, or as termed in linguistics, deixis, are an essential part of communication around data visualizations. Despite their importance, such gestures are often overlooked when documenting data analysis meetings. Transcripts, for instance, fail to capture gestures, and video recordings may not adequately capture or emphasize them. We introduce a novel method for documenting collaborative data meetings that treats deixis as a first-class citizen. Our proposed framework captures cursor-based gestural data along with audio and converts them into interactive documents. The framework leverages a large language model to identify word correspondences with gestures. These identified references are used to create context-based annotations in the resulting interactive document. We assess the effectiveness of our proposed method through a user study, finding that participants preferred our automated interactive documentation over recordings, transcripts, and manual note-taking. Furthermore, we derive a preliminary taxonomy of cursor-based deictic gestures from participant actions during the study. This taxonomy offers further opportunities for better utilizing cursor-based deixis in collaborative data analysis scenarios.","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"hatch.on27@gmail.com","is_corresponding":true,"name":"Chang Han"},{"affiliations":["The University of Utah, Salt Lake City, United States"],"email":"kisaacs@sci.utah.edu","is_corresponding":false,"name":"Katherine E. Isaacs"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1487","image_caption":"An overview of the interactive notes, with: (A) Interactive text, comprising transcripts from audio and the LLM-generated meeting minutes, includes interactive text components based on the results of utterance matching and reference extraction. (B) Visual media from the meetings are presented with annotations based on parameters transmitted by the interactive text on the left. This operation can change the underlying visualization, add annotations, and alter interactive states. ","keywords":["Taxonomy, Models, Frameworks, Theory ; Collaboration ; Communication/Presentation, Storytelling"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.04041","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/w1Tud4nOruI&t=0h39m5s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1487/v-full-1487_Preview.mp4?token=FQT1xLvgPUr_7zSaXHM2CKNTj1_d2c5nF6amiRsmBtY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1487/v-full-1487_Preview.srt?token=GmbrGAoHSO2hRXYZfucWq6Ym-yzgi6syszwqk7mHthw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-full","session_youtube_ff_id":"c8cC1W9ucj8","session_youtube_ff_link":"https://youtu.be/c8cC1W9ucj8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=0h39m5s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T16:36:00Z","title":"A Deixis-Centered Approach for Documenting Remote Synchronous Communication around Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20223229017","abstract":"We present V-Mail, a framework of cross-platform applications, interactive techniques, and communication protocols for improved multi-person correspondence about spatial 3D datasets. Inspired by the daily use of e-mail, V-Mail seeks to enable a similar style of rapid, multi-person communication accessible on any device; however, it aims to do this in the new context of spatial 3D communication, where limited access to 3D graphics hardware typically prevents such communication. The approach integrates visual data storytelling with data exploration, spatial annotations, and animated transitions. V-Mail ``data stories'' are exported in a standard video file format to establish a common baseline level of access on (almost) any device. The V-Mail framework also includes a series of complementary client applications and plugins that enable different degrees of story co-authoring and data exploration, adjusted automatically to match the capabilities of various devices. A lightweight, phone-based V-Mail app makes it possible to annotate data by adding captions to the video. These spatial annotations are then immediately accessible to team members running high-end 3D graphics visualization systems that also include a V-Mail client, implemented as a plugin. Results and evaluation from applying V-Mail to assist communication within an interdisciplinary science team studying Antarctic ice sheets confirm the utility of the asynchronous, cross-platform collaborative framework while also highlighting some current limitations and opportunities for future work.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Jung Who Nam"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tobias Isenberg"},{"affiliations":"","email":"","is_corresponding":true,"name":"Daniel F. Keefe"}],"award":"","doi":"10.1109/TVCG.2022.3229017","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20223229017","image_caption":"V-Mail is a framework of cross-platform applications, interactive techniques, and communication protocols for multi-person correspondence about spatial 3D datasets. It has three working platforms that demonstrate different storytelling fidelities of V-Mail: (bottom-left) anyone with a video player can at least passively view the story, including annotations made by others; (top-right) in the highest-fidelity case, the story unlocks data on a V-Mail server than can be loaded via a plugin for desktop-based visualization applications, where users can explore and annotate the 3D data more deeply; (bottom-right) the mobile client works as a custom video player with mechanisms for adding annotations. ","keywords":["Human-computer interaction, visualization of scientific 3D data, communication, storytelling, immersive analytics"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-03924707","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/w1Tud4nOruI&t=0h27m6s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20223229017/v-tvcg-20223229017_Preview.mp4?token=IxbiG0ErK3J9Z37IFK6q9x6nleACetMR6uzAo29MSk8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20223229017/v-tvcg-20223229017_Preview.srt?token=q9ZZ0nKol3LUeioElIKQC0rnZo08cDV3NmSNp9rilik&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-tvcg","session_youtube_ff_id":"5sr7x3v9C1Y","session_youtube_ff_link":"https://youtu.be/5sr7x3v9C1Y","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=0h27m6s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T16:24:00Z","title":"V-Mail: 3D-Enabled Correspondence about Spatial Data on (Almost) All Your Devices","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233323150","abstract":"We examined user preferences to combine multiple interaction modalities for collaborative interaction with data shown on large vertical displays. Large vertical displays facilitate visual data exploration and allow the use of diverse interaction modalities by multiple users at different distances from the screen. Yet, how to offer multiple interaction modalities is a non-trivial problem. We conducted an elicitation study with 20 participants that generated 1015 interaction proposals combining touch, speech, pen, and mid-air gestures. Given the opportunity to interact using these four 13 modalities, participants preferred speech interaction in 10 of 15 14 low-level tasks and direct manipulation for straightforward tasks 15 such as showing a tooltip or selecting. In contrast to previous work, 16 participants most favored unimodal and personal interactions. We 17 identified what we call collaborative synonyms among their interaction proposals and found that pairs of users collaborated either unimodally and simultaneously or multimodally and sequentially. We provide insights into how end-users associate visual exploration tasks with certain modalities and how they collaborate at different interaction distances using specific interaction modalities. The supplemental material is available at https://osf.io/m8zuh.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Gabriela Molina Le\u00f3n"},{"affiliations":"","email":"","is_corresponding":false,"name":"Petra Isenberg"},{"affiliations":"","email":"","is_corresponding":false,"name":"Andreas Breiter"}],"award":"","doi":"10.1109/TVCG.2023.3323150","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233323150","image_caption":"One person taps on the large vertical display to position an annotation on a bar chart, while the second one waits to perform a speech command to complete the annotation.","keywords":["Multimodal interaction, collaborative work, large vertical displays, elicitation study, spatio-temporal data"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://inria.hal.science/hal-04365019","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/w1Tud4nOruI&t=0h50m10s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233323150/v-tvcg-20233323150_Preview.mp4?token=oeG0IHkINWFB_kHZN1cFo4gGQIikZc5JzGhSffvQrqo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233323150/v-tvcg-20233323150_Preview.srt?token=-mODdCEMCAaG_-Yn7OVw0Pv66hYopF49mxbOk6GZYyE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full24","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Collaboration and Communication","session_uid":"v-tvcg","session_youtube_ff_id":"3_88Dw8U6wo","session_youtube_ff_link":"https://youtu.be/3_88Dw8U6wo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/w1Tud4nOruI&t=0h50m10s","sessions":["Collaboration and Communication"],"time_stamp":"2024-10-16T16:48:00Z","title":"Eliciting Multimodal and Collaborative Interactions for Data Exploration on Large Vertical Displays","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1063","abstract":"While previous work has found success in deploying visualizations as museum exhibits, it has not investigated whether museum context impacts visitor behaviour with these exhibits. We present an interactive Deep-time Literacy Visualization Exhibit (DeLVE) to help museum visitors understand deep time (lengths of extremely long geological processes) by improving proportional reasoning skills through comparison of different time periods. DeLVE uses a new visualization idiom, Connected Multi-Tier Ranges, to visualize curated datasets of past events across multiple scales of time, relating extreme scales with concrete scales that have more familiar magnitudes and units. Museum staff at three separate museums approved the deployment of DeLVE as a digital kiosk, and devoted time to curating a unique dataset in each of them. We collect data from two sources, an observational study and system trace logs. We discuss the importance of context: similar museum exhibits in different contexts were received very differently by visitors. We additionally discuss differences in our process from Sedlmair et al.'s design study methodology which is focused on design studies triggered by connection with collaborators rather than the discovery of a concept to communicate. Supplemental materials are available at: https://osf.io/z53dq/","accessible_pdf":true,"authors":[{"affiliations":["The University of British Columbia, Vancouver, Canada"],"email":"marasolen@gmail.com","is_corresponding":true,"name":"Mara Solen"},{"affiliations":["University of British Columbia , Vancouver, Canada"],"email":"sultananigar70@gmail.com","is_corresponding":false,"name":"Nigar Sultana"},{"affiliations":["University of British Columbia, Vancouver, Canada"],"email":"laura.lukes@ubc.ca","is_corresponding":false,"name":"Laura A. Lukes"},{"affiliations":["University of British Columbia, Vancouver, Canada"],"email":"tmm@cs.ubc.ca","is_corresponding":false,"name":"Tamara Munzner"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1063","image_caption":"The DeLVE visualization software, displaying the dataset of past events in biological and geological history, as deployed at a biology museum. The data is visualized across multiple scales using our novel Connected Multi-Tier Ranges idiom.","keywords":["Visualization, design study, museum, deep time."],"open_access_supplemental_link":"https://osf.io/z53dq/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2404.01488","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/I_s2xqsUD28&t=0h0m44s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1063/v-full-1063_Preview.mp4?token=imowDK921MoWpId_cyY984Z_zSeJnC13Qem8GS3ciF0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1063/v-full-1063_Preview.srt?token=qJtfNNpAEr4S1t3_WdJqLeMQXxxliYqfkZaDzb_89Gc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-full","session_youtube_ff_id":"LoSRYmcllmY","session_youtube_ff_link":"https://youtu.be/LoSRYmcllmY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h0m44s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T16:00:00Z","title":"DeLVE into Earth\u2019s Past: A Visualization-Based Exhibit Deployed Across Multiple Museum Contexts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1333","abstract":"Data videos increasingly becoming a popular data storytelling form represented by visual and audio integration. In recent years, more and more researchers have explored many narrative structures for effective and attractive data storytelling. Meanwhile, the Hero's Journey provides a classic narrative framework specific to the Hero's story that has been adopted by various mediums. There are continuous discussions about applying Hero's Journey to data stories. However, so far, little systematic and practical guidance on how to create a data video for a specific story type like the Hero's Journey, as well as how to manipulate its sound and visual designs simultaneously. To fulfill this gap, we first identified 48 data videos aligned with the Hero's Journey as the common storytelling from 109 high-quality data videos. Then, we examined how existing practices apply Hero's Journey for creating data videos. We coded the 48 data videos in terms of the narrative stages, sound design, and visual design according to the Hero's Journey structure. Based on our findings, we proposed a design space to provide practical guidance on the narrative, visual, and sound custom design for different narrative segments of the hero's journey (i.e., Departure, Initiation, Return) through data video creation. To validate our proposed design space, we conducted a user study where 20 participants were invited to design data videos with and without our design space guidance, which was evaluated by two experts. Results show that our design space provides useful and practical guidance for data storytellers effectively creating data videos with the Hero's Journey.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, Hong Kong"],"email":"zwei302@connect.hkust-gz.edu.cn","is_corresponding":true,"name":"Zheng Wei"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"xxubq@connect.ust.hk","is_corresponding":false,"name":"Xian Xu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1333","image_caption":"Applying the Hero's Journey as a framework for creating data videos, we organize a design space into three segments (i.e., Departure, Initiation, Return), grounded in the narrative structure of the Hero's Journey. The Departure has six narrative stages, the Initiation has seven narrative stages, and the Return has four narrative stages. Each narrative stage is equipped with corresponding sound design and visual design.","keywords":["The Hero's Journey, Narrative Structure, Narrative Visualization, Data Visualization, Data Videos"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/I_s2xqsUD28&t=0h12m47s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1333/v-full-1333_Preview.mp4?token=fETBO1EkdBunsDoPRjp9sEM_y_dQMiu_zH8vQNcA_3E&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-full","session_youtube_ff_id":"IXwVnOl8OAo","session_youtube_ff_link":"https://youtu.be/IXwVnOl8OAo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h12m47s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T16:12:00Z","title":"Telling Data Stories with the Hero\u2019s Journey: Design Guidance for Creating Data Videos","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1425","abstract":"Comics are an effective method for sequential data-driven storytelling, especially for dynamic graphs\u2014graphs whose vertices and edges change over time. However, manually creating such comics is currently time-consuming, complex, and error-prone. In this paper, we propose DG Comics, a novel comic authoring tool for dynamic graphs that allows users to semi-automatically build and annotate comics. The tool uses a newly developed hierarchical clustering algorithm to segment consecutive snapshots of dynamic graphs while preserving their chronological order. It also presents rich information on both individuals and communities extracted from dynamic graphs in multiple views, where users can explore dynamic graphs and choose what to tell in comics. For evaluation, we provide an example and report the results of a user study and an expert review. ","accessible_pdf":false,"authors":[{"affiliations":["Ulsan National Institute of Science and Technology, Ulsan, Korea, Republic of"],"email":"joohee@unist.ac.kr","is_corresponding":true,"name":"Joohee Kim"},{"affiliations":["Ulsan National Institute of Science and Technology, Ulsan, Korea, Republic of"],"email":"gusdnr0916@unist.ac.kr","is_corresponding":false,"name":"Hyunwook Lee"},{"affiliations":["Ulsan National Institute of Science and Technology, Ulsan, Korea, Republic of"],"email":"ducnm@unist.ac.kr","is_corresponding":false,"name":"Duc M. Nguyen"},{"affiliations":["Australian National University, Canberra, Australia"],"email":"minjeong.shin@anu.edu.au","is_corresponding":false,"name":"Minjeong Shin"},{"affiliations":["IBM Research, Cambridge, United States"],"email":"bumchul.kwon@us.ibm.com","is_corresponding":false,"name":"Bum Chul Kwon"},{"affiliations":["UNIST, Ulsan, Korea, Republic of"],"email":"sako@unist.ac.kr","is_corresponding":false,"name":"Sungahn Ko"},{"affiliations":["Aarhus University, Aarhus, Denmark"],"email":"elm@cs.au.dk","is_corresponding":false,"name":"Niklas Elmqvist"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1425","image_caption":"DG Comics offers a Summary View that facilitates the automatic generation of comic templates, sliders for filtering and highlighting nodes, a Graph Comic View for editing the graph comic, and Main Character and Supporting Character tables for managing nodes. It also includes a Timeline View for exploring graph snapshots. Users can switch to the Node Attribute Table to select specific main characters or to the Community View to inspect the evolution of node relationships. The tool supports (M) mental map preservation by fixing nodes across displays and visualizes (O) community changes using bubble sets.","keywords":["Data-driven storytelling, narrative visualization, dynamic graphs, graph comics"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.04874","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/I_s2xqsUD28&t=0h59m56s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1425/v-full-1425_Preview.mp4?token=k2gdCk90u5wFDpzx5zb2D4M_tSdgJgLIUBwsRlEFYB0&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-full","session_youtube_ff_id":"qzU1QLDM4zs","session_youtube_ff_link":"https://youtu.be/qzU1QLDM4zs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h59m56s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T17:00:00Z","title":"DG Comics: Semi-Automatically Authoring Graph Comics for Dynamic Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243372104","abstract":"With the rise of short-form video platforms and the increasing availability of data, we see the potential for people to share short-form videos embedded with data in situ (e.g., daily steps when running) to increase the credibility and expressiveness of their stories. However, creating and sharing such videos in situ is challenging since it involves multiple steps and skills (e.g., data visualization creation and video editing), especially for amateurs. By conducting a formative study (N=10) using three design probes, we collected the motivations and design requirements. We then built VisTellAR, a mobile AR authoring tool, to help amateur video creators embed data visualizations in short-form videos in situ. A two-day user study shows that participants (N=12) successfully created various videos with data visualizations in situ and they confirmed the ease of use and learning. AR pre-stage authoring was useful to assist people in setting up data visualizations in reality with more designs in camera movements and interaction with gestures and physical objects to storytelling.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Wai Tong"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kento Shigyo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lin-Ping Yuan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Mingming Fan"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ting-Chuen Pong"},{"affiliations":"","email":"","is_corresponding":false,"name":"Huamin Qu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Meng Xia"}],"award":"","doi":"10.1109/TVCG.2024.3372104","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243372104","image_caption":"This figure illustrates an authoring process. (a-b) VisTellAR detects planes and objects for users to anchor visualizations in reality. Users can edit the data, mark, axis, and behavior. (c-d) During video-taking, users can voice over, perform hand gestures, and see a countdown that notifies them when the visualization will be shown. (e-f) After taking the video, a timeline is shown to indicate when visualizations take place in the video. Users can reconfigure visualizations if needed. ","keywords":["Personal data, augmented reality, data visualization, storytelling, short-form video"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://www.researchgate.net/publication/378657335_VisTellAR_Embedding_Data_Visualization_to_Short-form_Videos_Using_Mobile_Augmented_Reality","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/I_s2xqsUD28&t=0h23m29s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243372104/v-tvcg-20243372104_Preview.mp4?token=2PJhSCM9a-82r8KWVB_bvpn5XWzShv6y8d50FAY5m4o&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"EeX1q0ZhSII","session_youtube_ff_link":"https://youtu.be/EeX1q0ZhSII","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h23m29s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T16:24:00Z","title":"VisTellAR: Embedding Data Visualization to Short-form Videos Using Mobile Augmented Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243397004","abstract":"Data charts are prevalent across various fields due to their efficacy in conveying complex data relationships. However, static charts may sometimes struggle to engage readers and efficiently present intricate information, potentially resulting in limited understanding. We introduce \u201cLive Charts,\u201d a new format of presentation that decomposes complex information within a chart and explains the information pieces sequentially through rich animations and accompanying audio narration. We propose an automated approach to revive static charts into Live Charts. Our method integrates GNN-based techniques to analyze the chart components and extract data from charts. Then we adopt large natural language models to generate appropriate animated visuals along with a voice-over to produce Live Charts from static ones. We conducted a thorough evaluation of our approach, which involved the model performance, use cases, a crowd-sourced user study, and expert interviews. The results demonstrate Live Charts offer a multi-sensory experience where readers can follow the information and understand the data insights better. We analyze the benefits and drawbacks of Live Charts over static charts as a new information consumption experience.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Lu Ying"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yun Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haotian Li"},{"affiliations":"","email":"","is_corresponding":false,"name":"Shuguang Dou"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haidong Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xinyang Jiang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Huamin Qu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"10.1109/TVCG.2024.3397004","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243397004","image_caption":"Two Live Charts are presented: (a1-a5) and (b1-b5). The image flow illustrates the keyframes of the LiveChart, with animations highlighted by dotted blue boxes. The following text provides the corresponding audio narration, with the first tag identifying the chart component or type of insight being described.","keywords":["Charts, storytelling, machine learning, automatic visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/I_s2xqsUD28&t=0h47m57s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243397004/v-tvcg-20243397004_Preview.mp4?token=br95VJa9I28ipJKCRSa-9txfELwn1J1Gapsz5LvOlKo&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"_-K7ygteIfM","session_youtube_ff_link":"https://youtu.be/_-K7ygteIfM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h47m57s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T16:48:00Z","title":"Reviving Static Charts into Live Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243411575","abstract":"Creating an animated data video with audio narration is a time-consuming and complex task that requires expertise. It involves designing complex animations, turning written scripts into audio narrations, and synchronizing visual changes with the narrations. This paper presents WonderFlow, an interactive authoring tool, that facilitates narration-centric design of animated data videos. WonderFlow allows authors to easily specify semantic links between text and the corresponding chart elements. Then it automatically generates audio narration by leveraging text-to-speech techniques and aligns the narration with an animation. WonderFlow provides a structure-aware animation library designed to ease chart animation creation, enabling authors to apply pre-designed animation effects to common visualization components. Additionally, authors can preview and refine their data videos within the same system, without having to switch between different creation tools. A series of evaluation results confirmed that WonderFlow is easy to use and simplifies the creation of data videos with narration-animation interplay.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Yun Wang"},{"affiliations":"","email":"","is_corresponding":true,"name":"Leixian Shen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zhengxin You"},{"affiliations":"","email":"","is_corresponding":false,"name":"Xinhuan Shu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Bongshin Lee"},{"affiliations":"","email":"","is_corresponding":false,"name":"John Thompson"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haidong Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Dongmei Zhang"}],"award":"","doi":"10.1109/TVCG.2024.3411575","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243411575","image_caption":"User interface of WonderFlow. Users can first select the text phrases in the narration editor (a) and visual elements from the canvas (b) to form text-visual links. Then they can apply an animation preset selected in the animation effect panel (c) to the visual elements. WonderFlow then generates a narration-animation pack on the timeline (d).","keywords":["Data video, Data visualization, Narration-animation interplay, Storytelling, Authoring tool"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/I_s2xqsUD28&t=0h35m40s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243411575/v-tvcg-20243411575_Preview.mp4?token=jHIqyZSMu4awRI-YQ30KvjX8iNUjp8d61CSq8x4Xn-w&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full25","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Once Upon a Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"qMT01pGEVfg","session_youtube_ff_link":"https://youtu.be/qMT01pGEVfg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/I_s2xqsUD28&t=0h35m40s","sessions":["Once Upon a Visualization"],"time_stamp":"2024-10-17T16:36:00Z","title":"WonderFlow: Narration-Centric Design of Animated Data Videos","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1288","abstract":"Data tables are one of the most common ways in which people encounter data. Although mostly built with text and numbers, data tables have a spatial layout and often exhibit visual elements meant to facilitate their reading. Surprisingly, there is an empirical knowledge gap on how people read tables and how different visual aids affect people's reading of tables. In this work, we seek to address this vacuum through a controlled study. We asked participants to repeatedly perform four different tasks with four table representation conditions (plain tables, tables with zebra striping, tables with cell background color encoding cell value, and tables with in-cell bars with lengths encoding cell value). We analyzed completion time, error rate, gaze-tracking data, mouse movement and participant preferences. We found that color and bar encodings help for finding maximum values. For a more complex task (comparison of proportional differences) color and bar helped less than zebra striping. We also characterize typical human behavior for the four tasks. These findings inform the design of tables and research directions for improving presentation of data in tabular form.","accessible_pdf":false,"authors":[{"affiliations":["University of Victoria, Victoria, Canada"],"email":"yongfengji@uvic.ca","is_corresponding":false,"name":"YongFeng Ji"},{"affiliations":["University of Victoria, Victoria, Canada"],"email":"cperin@uvic.ca","is_corresponding":true,"name":"Charles Perin"},{"affiliations":["University of Victoria, Victoria, Canada"],"email":"nacenta@gmail.com","is_corresponding":false,"name":"Miguel A Nacenta"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1288","image_caption":"We study the effects of one visual feature (zebra stripping, top right) and two visual encodings (color shading, bottom left, and data bars, bottom right) on the readability of numeric data tables, compared to a plain table (top left).","keywords":["Data Table, Visual Encoding, Visual Aid, Gaze Analysis, Zebra, Data Bars, Tabular Representations."],"open_access_supplemental_link":"https://osf.io/jfg3h/?view_only=f064cff189c4440299a3c3b10ddab232","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.31219/osf.io/2t3sc","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/gaAm2v-ENKA&t=0h13m20s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1288/v-full-1288_Preview.mp4?token=WrBIX_ELfQf5Wq5HyVgAdJ-Km4gCT2hW7QnwqnMe4og&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-full","session_youtube_ff_id":"U-KVskuEvz8","session_youtube_ff_link":"https://youtu.be/U-KVskuEvz8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=0h13m20s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T16:12:00Z","title":"The Effect of Visual Aids on Reading Numeric Data Tables","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1291","abstract":"Emotion is an important factor to consider when designing visualizations as it can impact the amount of trust viewers place in a visualization, how well they can retrieve information and understand the underlying data, and how much they engage with or connect to a visualization. We conducted five crowdsourced experiments to quantify the effects of color, chart type, data trend, data variability and data density on emotion (measured through self-reported arousal and valence). Results from our experiments show that there are multiple design elements which influence the emotion induced by a visualization and, more surprisingly, that certain data characteristics influence the emotion of viewers even when the data has no meaning. In light of these findings, we offer guidelines on how to use color, scale, and chart type to counterbalance and emphasize the emotional impact of immutable data characteristics.","accessible_pdf":false,"authors":[{"affiliations":["University of Waterloo, Waterloo, Canada","University of Victoria, Victoria, Canada"],"email":"cartergblair@gmail.com","is_corresponding":false,"name":"Carter Blair"},{"affiliations":["University of Victoria, Victoira, Canada","Delft University of Technology, Delft, Netherlands"],"email":"xiyao.wang23@gmail.com","is_corresponding":false,"name":"Xiyao Wang"},{"affiliations":["University of Victoria, Victoria, Canada"],"email":"cperin@uvic.ca","is_corresponding":true,"name":"Charles Perin"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1291","image_caption":"We quantify through five studies the effects of color (Study 1 and Study 2), chart type (Study 3, Study 4, and Study 5), data trend (Study 2 and Study 3), data variance (Study 4), and data density (Study 5) on emotion (measured through arousal and valence ratings using the Self-Assessment Manikin scale).","keywords":["Affect, Data Visualization, Emotion, Quantitative Study"],"open_access_supplemental_link":"https://osf.io/ywjs4/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.48550/arXiv.2407.18427","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/gaAm2v-ENKA&t=0h25m35s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1291/v-full-1291_Preview.mp4?token=libWQ7VS7pdmjYQyKZ3-lnWX54SUtumA-g-Tno70Egk&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-full","session_youtube_ff_id":"Hht8iAtJ40w","session_youtube_ff_link":"https://youtu.be/Hht8iAtJ40w","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=0h25m35s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T16:24:00Z","title":"Quantifying Emotional Responses to Immutable Data Characteristics and Designer Choices in Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1480","abstract":"We propose the notion of attention-aware visualizations (AAVs) that track the user\u2019s perception of a visual representation over time and feed this information back to the visualization. Such context awareness is particularly useful for ubiquitous and immersive analytics where knowing which embedded visualizations the user is looking at can be used to make visualizations react appropriately to the user\u2019s attention: for example, by highlighting data the user has not yet seen. We can separate the approach into three components: (1) measuring the user\u2019s gaze on a visualization and its parts; (2) tracking the user\u2019s attention over time; and (3) reactively modifying the visual representation based on the current attention metric. In this paper, we present two separate implementations of AAV: a 2D data-agnostic method for web-based visualizations that can use an embodied eyetracker to capture the user\u2019s gaze, and a 3D data-aware one that uses the stencil buffer to track the visibility of each individual mark in a visualization. Both methods provide similar mechanisms for accumulating attention over time and changing the appearance of marks in response. We also present results from a qualitative evaluation studying visual feedback and triggering mechanisms for capturing and revisualizing attention.","accessible_pdf":false,"authors":[{"affiliations":["Aarhus University, Aarhus, Denmark"],"email":"arvind@cs.au.dk","is_corresponding":false,"name":"Arvind Srinivasan"},{"affiliations":["Aarhus University, Aarhus N, Denmark"],"email":"johannes@ellemose.eu","is_corresponding":false,"name":"Johannes Ellemose"},{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"p.butcher@bangor.ac.uk","is_corresponding":false,"name":"Peter W. S. Butcher"},{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"p.ritsos@bangor.ac.uk","is_corresponding":false,"name":"Panagiotis D. Ritsos"},{"affiliations":["Aarhus University, Aarhus, Denmark"],"email":"elm@cs.au.dk","is_corresponding":false,"name":"Niklas Elmqvist"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1480","image_caption":"This image illustrates various Attention-aware re-visualization techniques that adapt based on user attention in both 3D and 2D spaces. The left side of the image focuses on our \u201cData Aware 3D\u201d implementation applying GPU Color Picking, featuring heatmaps and desaturation techniques that respond to user orientation, rotation, and location within a 3D environment. The right side displays our \u201cData Agnostic 2D\u201d implementation applying a Picture Framing Metaphor, highlighting how user attention, tracked through gaze, pointer, and keyboard input, shapes different frames like bar, area, and heat maps. These revisualizations that adjust dynamically to emphasize areas of interest based on cumulative attention were then qualitatively evaluated across different triggering mechanisms.","keywords":["Attention tracking, eyetracking, immersive analytics, ubiquitous analytics, post-WIMP interaction"],"open_access_supplemental_link":"https://osf.io/8mfhp/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2404.10732","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/gaAm2v-ENKA&t=1h2m25s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1480/v-full-1480_Preview.mp4?token=6hU4GabNxurQqMyFUGJ3MOpoqLwws2FyOL0qkKNd3Uk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1480/v-full-1480_Preview.srt?token=YWRI43FlHCRbJh6Gvl67T1d1OK9jEOT80_8LNGHH1GY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-full","session_youtube_ff_id":"cDGkQpk85yw","session_youtube_ff_link":"https://youtu.be/cDGkQpk85yw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=1h2m25s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T17:00:00Z","title":"Attention-Aware Visualization: Tracking and Responding to User Perception Over Time","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1638","abstract":"Probability density function (PDF) curves are among the few charts on a Cartesian coordinate system that are commonly presented without y-axes. This design decision may be due to the lack of relevance of vertical scaling in normal PDFs. In fact, as long as two normal PDFs have the same means and standard deviations (SDs), they can be scaled to occupy different amounts of vertical space while still remaining statistically identical. Because unfixed PDF height increases as SD decreases, visualization designers may find themselves tempted to vertically shrink low-SD PDFs to avoid occlusion or save white space in their figures. Although irregular vertical scaling has been explored in bar and line charts, the visualization community has yet to investigate how this visual manipulation may affect reader comparisons of PDFs. In this paper, we present two preregistered experiments (n = 600, n = 401) that systematically demonstrate that vertical scaling can lead to misinterpretations of PDFs. We also test visual interventions to mitigate misinterpretation. In some contexts, we find including a y-axis can help reduce this effect. Overall, we find that keeping vertical scaling consistent, and therefore maintaining equal pixel areas under PDF curves, results in the highest likelihood of accurate comparisons. Our findings provide insights into the impact of vertical scaling on PDFs, and reveal the complicated nature of proportional area comparisons.","accessible_pdf":true,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"racquel.fygenson@gmail.com","is_corresponding":true,"name":"Racquel Fygenson"},{"affiliations":["Northeastern University, Boston, United States"],"email":"l.padilla@northeastern.edu","is_corresponding":false,"name":"Lace M. Padilla"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1638","image_caption":"When showing multiple probability density function (PDF) plots, it can be compelling to shrink plots with small standard deviations that have tall peaks. This compression may save space and make figures look nicer, but could this compression impact reader comprehension? In this paper, we compare the impact of \"squishing\" PDF plots and find reader comparison of plots with different vertical scales is lower than that of plots with the same vertical scale. ","keywords":["visualization, probability density function, uncertainty, vertical scaling, perception, area chart"],"open_access_supplemental_link":"https://osf.io/7k5un/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/w3dgq","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/gaAm2v-ENKA&t=0h0m20s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1638/v-full-1638_Preview.mp4?token=vDxDHiVPykap408m44TBUUDPrz6zAmggZ2fhZxGb1sI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1638/v-full-1638_Preview.srt?token=SrQKxfxzDSTFwU5Cx8rPsqLwzeyUJn5189JlI0wpB7I&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-full","session_youtube_ff_id":"nHx017A7OcI","session_youtube_ff_link":"https://youtu.be/nHx017A7OcI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=0h0m20s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T16:00:00Z","title":"The Impact of Vertical Scaling on Normal Probability Density Function Plots","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233336588","abstract":"This article explores how the ability to recall information in data visualizations depends on the presentation technology. Participants viewed 10 Isotype visualizations on a 2D screen, in 3D, in Virtual Reality (VR) and in Mixed Reality (MR). To provide a fair comparison between the three 3D conditions, we used LIDAR to capture the details of the physical rooms, and used this information to create our textured 3D models. For all environments, we measured the number of visualizations recalled and their order (2D) or spatial location (3D, VR, MR). We also measured the number of syntactic and semantic features recalled. Results of our study show increased recall and greater richness of data understanding in the MR condition. Not only did participants recall more visualizations and ordinal/spatial positions in MR, but they also remembered more details about graph axes and data mappings, and more information about the shape of the data. We discuss how differences in the spatial and kinesthetic cues provided in these different environments could contribute to these results, and reasons why we did not observe comparable performance in the 3D and VR conditions.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Christophe Hurter"},{"affiliations":"","email":"","is_corresponding":false,"name":"Bernice Rogowitz"},{"affiliations":"","email":"","is_corresponding":false,"name":"Guillaume Truong"},{"affiliations":"","email":"","is_corresponding":false,"name":"Tiffany Andry"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hugo Romat"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ludovic Gardy"},{"affiliations":"","email":"","is_corresponding":false,"name":"Fereshteh Amini"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nathalie Henry Riche"}],"award":"","doi":"10.1109/TVCG.2023.3336588","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233336588","image_caption":"In this study, inspired by the memory palace technique, we explore how different presentation technologies impact the recall of data, specifically using Isotypes. ","keywords":["Data visualization, Three-dimensional displays, Virtual reality, Mixed reality, Electronic mail, Syntactics, Semantics"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/gaAm2v-ENKA&t=0h50m0s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233336588/v-tvcg-20233336588_Preview.mp4?token=NXF49TRW3Iw2249Btsx68oteEXEikPfeZHc82cpwHnA&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-tvcg","session_youtube_ff_id":"grzXRIstvMk","session_youtube_ff_link":"https://youtu.be/grzXRIstvMk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=0h50m0s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T16:48:00Z","title":"Memory Recall for Data Visualizations in Mixed Reality, Virtual Reality, 3D, and 2D","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243372620","abstract":"Small multiples are a popular visualization method, displaying different views of a dataset using multiple frames, often with the same scale and axes. However, there is a need to address their potential constraints, especially in the context of human cognitive capacity limits. These limits dictate the maximum information our mind can process at once. We explore the issue of capacity limitation by testing competing theories that describe how the number of frames shown in a display, the scale of the frames, and time constraints impact user performance with small multiples of line charts in an energy grid scenario. In two online studies (Experiment 1 n = 141 and Experiment 2 n = 360) and a follow-up eye-tracking analysis (n=5),we found a linear decline in accuracy with increasing frames across seven tasks, which was not fully explained by differences in frame size, suggesting visual search challenges. Moreover, the studies demonstrate that highlighting specific frames can mitigate some visual search difficulties but, surprisingly, not eliminate them. This research offers insights into optimizing the utility of small multiples by aligning them with human limitations.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Helia Hosseinpour"},{"affiliations":"","email":"","is_corresponding":false,"name":"Laura E. Matzen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kristin M. Divis"},{"affiliations":"","email":"","is_corresponding":false,"name":"Spencer C. Castro"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lace Padilla"}],"award":"","doi":"10.1109/TVCG.2024.3372620","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243372620","image_caption":"Fig. 1: Example of the small multiple stimuli used in Experiment 1 that varied in frame quantity from 2 to 70, incremented by four frames. The stimuli depicted power (in megawatts) over time (one year per frame).","keywords":["Cognition, small multiples, time-series data"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/psyarxiv/a6k8z","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/gaAm2v-ENKA&t=0h37m35s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243372620/v-tvcg-20243372620_Preview.mp4?token=wJKQdng-SaTIZarhp7tlUrcbG5MCiUwtGwuxwhuMdJo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243372620/v-tvcg-20243372620_Preview.srt?token=SpxX29O_iIJjG_XPF7jCMxjFQ8v7Th7nCw-bF1NFT4o&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full26","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Perception and Cognition","session_uid":"v-tvcg","session_youtube_ff_id":"i_ZRWKrK2fs","session_youtube_ff_link":"https://youtu.be/i_ZRWKrK2fs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/gaAm2v-ENKA&t=0h37m35s","sessions":["Perception and Cognition"],"time_stamp":"2024-10-16T16:36:00Z","title":"Examining Limits of Small Multiples: Frame Quantity Impacts Judgments with Line Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1214","abstract":"Graphs are often used to model relationships between entities. The identification and visualization of clusters in graphs enable insight discovery in many application areas, such as life sciences and social sciences. Force-directed graph layouts promote the visual saliency of clusters, as they bring adjacent nodes closer together, and push non-adjacent nodes apart. At the same time, matrices can effectively show clusters when a suitable row/column ordering is applied, but are less appealing to untrained users not providing an intuitive node-link metaphor. It is thus worth exploring layouts combining the strengths of the node-link metaphor and node ordering. In this work, we study the impact of node ordering on the visual saliency of clusters in orderable node-link diagrams, namely radial diagrams, arc diagrams and symmetric arc diagrams. Through a crowdsourced controlled experiment, we show that users can count clusters consistently more accurately, and to a large extent faster, with orderable node-link diagrams than with three state-of-the art force-directed layout algorithms, i.e., `Linlog', `Backbone' and `sfdp'. The measured advantage is greater in case of low cluster separability and/or low compactness. A free copy of this paper and all supplemental materials are available at https://osf.io/kc3dg/.","accessible_pdf":false,"authors":[{"affiliations":["Luxembourg Institute of Science and Technology, Esch-sur-Alzette, Luxembourg"],"email":"nora.alnaami@list.lu","is_corresponding":false,"name":"Nora Al-Naami"},{"affiliations":["Luxembourg Institute of Science and Technology, Belvaux, Luxembourg"],"email":"nicolas.medoc@list.lu","is_corresponding":false,"name":"Nicolas Medoc"},{"affiliations":["Uppsala University, Uppsala, Sweden"],"email":"matteo.magnani@it.uu.se","is_corresponding":false,"name":"Matteo Magnani"},{"affiliations":["Luxembourg Institute of Science and Technology, Belvaux, Luxembourg"],"email":"mohammad.ghoniem@list.lu","is_corresponding":true,"name":"Mohammad Ghoniem"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1214","image_caption":"A symmetric arc diagram representing a 51-node graph extracted from the co-occurrence network of characters of \"Les Mis\u00e9rables\", the novel of Victor Hugo. The nodes are ordered according to the crossing reduction algorithm. ","keywords":["network visualization, arc diagrams, radial diagrams, cluster perception, graph seriation"],"open_access_supplemental_link":"https://osf.io/kc3dg/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-04668352","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/cJNBh2zSTiU&t=0h0m34s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1214/v-full-1214_Preview.mp4?token=Eq1d9_8bf1Cs8leruLVlhzqxc5uiAWsof3tZ5n9GSsE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1214/v-full-1214_Preview.srt?token=vw2UqBRc1sxfQeUX2e6GTIEpHwT9U6tNFr7atJhP8hY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-full","session_youtube_ff_id":"8QT8_S2C0fs","session_youtube_ff_link":"https://youtu.be/8QT8_S2C0fs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=0h0m34s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T17:45:00Z","title":"Improved Visual Saliency of Graph Clusters with Orderable Node-Link Layouts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1483","abstract":"Egocentric networks, often visualized as node-link diagrams, portray the complex relationship (link) dynamics between an entity (node) and others. However, common analytics tasks are multifaceted, encompassing interactions among four key aspects: strength, function, structure, and content. Current node-link visualization designs may fall short, focusing narrowly on certain aspects and neglecting the holistic, dynamic nature of egocentric networks. To bridge this gap, we introduce SpreadLine, a novel visualization framework designed to enable the visual exploration of egocentric networks from these four aspects at the microscopic level. Leveraging the intuitive appeal of storyline visualizations, SpreadLine adopts a storyline-based design to represent entities and their evolving relationships. We further encode essential topological information in the layout and condense the contextual information in a metro map metaphor, allowing for a more engaging and effective way to explore temporal and attribute-based information. To guide our work, with a thorough review of pertinent literature, we have distilled a task taxonomy that addresses the analytical needs specific to egocentric network exploration.Acknowledging the diverse analytical requirements of users, SpreadLine offers customizable encodings to enable users to tailor the framework for their tasks. We demonstrate the efficacy and general applicability of SpreadLine through three diverse real-world case studies (disease surveillance, social media trends, and academic career evolution) and a usability study. ","accessible_pdf":true,"authors":[{"affiliations":["University of California, Davis, Davis, United States"],"email":"yskuo@ucdavis.edu","is_corresponding":true,"name":"Yun-Hsin Kuo"},{"affiliations":["University of California at Davis, Davis, United States"],"email":"dyuliu@ucdavis.edu","is_corresponding":false,"name":"Dongyu Liu"},{"affiliations":["University of California at Davis, Davis, United States"],"email":"ma@cs.ucdavis.edu","is_corresponding":false,"name":"Kwan-Liu Ma"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1483","image_caption":"SpreadLine is a visualization framework for exploring dynamic egocentric networks. It builds upon storyline visualizations to represent four network aspects: structure, strength, function, and content. Guided by a literature review, SpreadLine addresses essential analysis tasks and offers customizable encodings to meet diverse user needs. This figure presents an example of SpreadLine showing public reaction to a significant event.","keywords":["egocentric network, network analysis, design study, storyline visualization, visual exploration, metaphor"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/cJNBh2zSTiU&t=0h26m40s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1483/v-full-1483_Preview.mp4?token=ytXWxPflKOx5y9augCJSEbmxJEVDgPNM0UIRemLg5Lo&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-full","session_youtube_ff_id":"N4HpqmtLsDc","session_youtube_ff_link":"https://youtu.be/N4HpqmtLsDc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=0h26m40s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T18:09:00Z","title":"SpreadLine: Visualizing Egocentric Dynamic Influence","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1809","abstract":"Visualizing relational data is crucial for understanding complex connections between entities in social networks, political affiliations, or biological interactions. Well-known representations like node-link diagrams and adjacency matrices offer valuable insights, but their effectiveness relies on the ability to identify patterns in the underlying topological structure. Reordering strategies and layout algorithms play a vital role in the visualization process since the arrangement of nodes, edges, or cells influences the visibility of these patterns. The BioFabric visualization combines elements of node-link diagrams and adjacency matrices, leveraging the strengths of both, the visual clarity of node-link diagrams and the tabular organization of adjacency matrices.A unique characteristic of BioFabric is the possibility to reorder nodes and edges separately.This raises the question of which combination of layout algorithms best reveals certain patterns. In this paper, we discuss patterns and anti-patterns in BioFabric, such as staircases or escalators, relate them to already established patterns, and propose metrics to evaluate their quality. Based on these quality metrics, we compared combinations of well-established reordering techniques applied to BioFabric with a well-known benchmark data set. Our experiments indicate that the edge order has a stronger influence on revealing patterns than the node layout. The results show that the best combination for revealing staircases is a barycentric node layout, together with an edge order based on node indices and length.Our research contributes a first building block for many promising future research directions, which we also share and discuss. A free copy of this paper and all supplemental materials are available at https://osf.io/9mt8r/?view_only=b70dfbe550e3404f83059afdc60184c6","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"fuchs@dbvis.inf.uni-konstanz.de","is_corresponding":true,"name":"Johannes Fuchs"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"alexander.frings@uni-konstanz.de","is_corresponding":false,"name":"Alexander Frings"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"maria-viktoria.heinle@uni-konstanz.de","is_corresponding":false,"name":"Maria-Viktoria Heinle"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"keim@uni-konstanz.de","is_corresponding":false,"name":"Daniel Keim"},{"affiliations":["University of Konstanz, Konstanz, Germany","TU Wien, Vienna, Austria"],"email":"sara.di-bartolomeo@uni-konstanz.de","is_corresponding":false,"name":"Sara Di Bartolomeo"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1809","image_caption":"The same synthetic data is visualized with BioFabric. The edge order has a huge influence on the appearance of patterns. A random edge order shows no topological structure, whereas our degreecending technique reveals three staircases and one path.","keywords":["Network Visualization, Graph Drawing, Graph Layout Algorithms, BioFabric, Graph Motif"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/cJNBh2zSTiU&t=0h12m40s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1809/v-full-1809_Preview.mp4?token=sYnN8_S710yTFOr4sy8fpyeSUYtQ5sxjnLXbRBwWYls&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1809/v-full-1809_Preview.srt?token=bMkBvuiBg59LNAB_X3tZa5xdzkXahJe4Z42ye7nZqwA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-full","session_youtube_ff_id":"z5Loo1vtnXg","session_youtube_ff_link":"https://youtu.be/z5Loo1vtnXg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=0h12m40s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T17:57:00Z","title":"Quality Metrics and Reordering Strategies for Revealing Patterns in BioFabric Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1874","abstract":"A layered graph is an important category of graph in which every node is assigned to a layer, and layers are drawn as parallel or radial lines. They are commonly used to display temporal data or hierarchical graphs. Previous research has demonstrated that minimizing edge crossings is the most important criterion to consider when looking to improve the readability of such graphs. While heuristic approaches exist for crossing minimization, we are interested in optimal approaches to the problem that prioritize human readability over computational scalability. We aim to improve the usefulness and applicability of such optimal methods by understanding and improving their scalability to larger graphs. This paper categorizes and evaluates the state-of-the-art linear programming formulations for exact crossing minimization and describes nine new and existing techniques that could plausibly accelerate the optimization algorithm. Through a computational evaluation, we explore each technique's effect on calculation time and how the techniques assist or inhibit one another, allowing researchers and practitioners to adapt them to the characteristics of their graphs. Our best-performing techniques yielded a median improvement of 2.5\u201317x depending on the solver used, giving us the capability to create optimal layouts faster and for larger graphs. We provide an open-source implementation of our methodology in Python, where users can pick which combination of techniques to enable according to their use case. A free copy of this paper and all supplemental materials, datasets used, and source code are available at https://osf.io/5vq79.","accessible_pdf":true,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"wilson.conn@northeastern.edu","is_corresponding":true,"name":"Connor Wilson"},{"affiliations":["Northeastern University, Boston, United States"],"email":"eduardopuertac@gmail.com","is_corresponding":false,"name":"Eduardo Puerta"},{"affiliations":["northeastern university, Boston, United States"],"email":"turokhunter@gmail.com","is_corresponding":false,"name":"Tarik Crnovrsanin"},{"affiliations":["University of Konstanz, Konstanz, Germany","Northeastern University, Boston, United States"],"email":"sara.di-bartolomeo@uni-konstanz.de","is_corresponding":false,"name":"Sara Di Bartolomeo"},{"affiliations":["Northeastern University, Boston, United States"],"email":"c.dunne@northeastern.edu","is_corresponding":false,"name":"Cody Dunne"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1874","image_caption":"In this work, we characterize nine techniques to improve the performance of an integer linear programming (ILP) formulation and empirically test their improvement. We call these switches since they can be toggled and combined. Here, the behavior of the one of the switches, symmetry breaking, is illustrated. This technique removes redundancy in the model by fixing one of the decision variables. We find that use of the switch almost invariably improves the speed of the optimization solver.","keywords":["Integer linear programming, layered graph drawing, layered network visualization, crossing minimization, edge crossings"],"open_access_supplemental_link":"https://osf.io/5vq79","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/cJNBh2zSTiU&t=1h2m40s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1874/v-full-1874_Preview.mp4?token=G85Efq1rI0eSTL1MK_drwz50PwtWDcSI-zBmJYnTLUE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-full","session_youtube_ff_id":"wIQnahaRsKk","session_youtube_ff_link":"https://youtu.be/wIQnahaRsKk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=1h2m40s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T18:45:00Z","title":"Evaluating and extending speedup techniques for optimal crossing minimization in layered graph drawings","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233310019","abstract":"The dynamic network visualization design space consists of two major dimensions: network structural and temporal representation. As more techniques are developed and published, a clear need for evaluation and experimental comparisons between them emerges. Most studies explore the temporal dimension and diverse interaction techniques supporting the participants, focusing on a single structural representation. Empirical evidence about performance and preference for different visualization approaches is scattered over different studies, experimental settings, and tasks. This paper aims to comprehensively investigate the dynamic network visualization design space in two evaluations. First, a controlled study assessing participants' response times, accuracy, and preferences for different combinations of network structural and temporal representations on typical dynamic network exploration tasks, with and without the support of standard interaction methods. Second, the best-performing combinations from the first study are enhanced based on participants' feedback and evaluated in a heuristic-based qualitative study with visualization experts on a real-world network. Our results highlight node-link with animation and playback controls as the best-performing combination and the most preferred based on ratings. Matrices achieve similar performance to node-link in the first study but have considerably lower scores in our second evaluation. Similarly, juxtaposition exhibits evident scalability issues in more realistic analysis contexts.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Velitchko Filipov"},{"affiliations":"","email":"","is_corresponding":false,"name":"Alessio Arleo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Markus B\u00f6gl"},{"affiliations":"","email":"","is_corresponding":false,"name":"Silvia Miksch"}],"award":"","doi":"10.1109/TVCG.2023.3310019","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233310019","image_caption":"This study evaluates the effectiveness of various network structural and temporal encodings in dynamic network visualization, focusing on Node-Link diagrams and Adjacency Matrices. Through two comprehensive studies, we assessed the accuracy, response times, and user preferences for different visualization techniques, including Juxtaposition, Superimposition, Auto-Animation, and Animation with Playback Controls. Our findings highlight the strengths and limitations of each approach, providing critical insights for optimizing dynamic network analysis and designing with tasks in mind. The figure illustrates key methods: Network structural and temporal encodings\u2014Juxtaposition (A,D), Superimposition (B,E), and Animation with Playback Controls (C,F).","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/cJNBh2zSTiU&t=0h38m15s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233310019/v-tvcg-20233310019_Preview.mp4?token=02r77JEMELRLQ63amQMGJlcBzC5lAQwyWngc-6ZJD6Y&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233310019/v-tvcg-20233310019_Preview.srt?token=pFAzbhYPtALNNUdKtVzOnFtRyLq7V2pk4xP5fjJgUCc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-tvcg","session_youtube_ff_id":"kvHH763cMkU","session_youtube_ff_link":"https://youtu.be/kvHH763cMkU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=0h38m15s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T18:21:00Z","title":"On Network Structural and Temporal Encodings: A Space and Time Odyssey","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233337396","abstract":"Partitioning a dynamic network into subsets (i.e., snapshots) based on disjoint time intervals is a widely used technique for understanding how structural patterns of the network evolve. However, selecting an appropriate time window (i.e., slicing a dynamic network into snapshots) is challenging and time-consuming, often involving a trial-and-error approach to investigating underlying structural patterns. To address this challenge, we present MoNetExplorer, a novel interactive visual analytics system that leverages temporal network motifs to provide recommendations for window sizes and support users in visually comparing different slicing results. MoNetExplorer provides a comprehensive analysis based on window size, including (1) a temporal overview to identify the structural information, (2) temporal network motif composition, and (3) node-link-diagram-based details to enable users to identify and understand structural patterns at various temporal resolutions. To demonstrate the effectiveness of our system, we conducted a case study with network researchers using two real-world dynamic network datasets. Our case studies show that the system effectively supports users to gain valuable insights into the temporal and structural aspects of dynamic networks.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Seokweon Jung"},{"affiliations":"","email":"","is_corresponding":false,"name":"DongHwa Shin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hyeon Jeon"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kiroong Choe"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jinwook Seo"}],"award":"","doi":"10.1109/TVCG.2023.3337396","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233337396","image_caption":"MoNetExplorer is a visual analytics system designed to support the selection of appropriate window sizes for dynamic network analysis and provides a temporal and structural analysis of snapshots that are sliced according to window sizes. The system is composed of five linked components. (A) Slicing Navigation View supports the beginning of the workflow: selection of snapshot window sizes according to measures based on Temporal Network Motifs (TNM). (B) Temporal Measure View and (C) Temporal Status View enable validation of the quality of snapshots and identification of temporal patterns. (D) Motif Composition View visualizes the composition of temporal network motifs. (E) Bottom-level details of network structure are shown in Network View.","keywords":["Visual analytics, Measurement, Size measurement, Windows, Time measurement, Data visualization, Task analysis, Visual analytics, Dynamic networks, Temporal network motifs, Interactive network slicing"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/cJNBh2zSTiU&t=0h50m45s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233337396/v-tvcg-20233337396_Preview.mp4?token=0KbM6aJwuxs0G0Rpw-vPJ-2xQQAfINseTMtQMJ2zaJg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233337396/v-tvcg-20233337396_Preview.srt?token=TOU6HuziX7iYDRKMcEVe6c9fvmCcyt7m6ExjtTavEJs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full27","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Of Nodes and Networks","session_uid":"v-tvcg","session_youtube_ff_id":"8ShT_DsTgyQ","session_youtube_ff_link":"https://youtu.be/8ShT_DsTgyQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/cJNBh2zSTiU&t=0h50m45s","sessions":["Of Nodes and Networks"],"time_stamp":"2024-10-16T18:33:00Z","title":"MoNetExplorer: A Visual Analytics System for Analyzing Dynamic Networks with Temporal Network Motifs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1147","abstract":"Large Language Models (LLMs) like GPT-4 which support multimodal input (i.e., prompts containing images in addition to text) have immense potential to advance visualization research. However, many questions exist about the visual capabilities of such models, including how well they can read and interpret visually represented data. In our work, we address this question by evaluating the GPT-4 multimodal LLM using a suite of task sets meant to assess the model's visualization literacy. The task sets are based on existing work in the visualization community addressing both automated chart question answering and human visualization literacy across multiple settings. Our assessment finds that GPT-4 can perform tasks such as recognizing trends and extreme values, and also demonstrates some understanding of visualization design best-practices. By contrast, GPT-4 struggles with simple value retrieval when not provided with the original dataset, lacks the ability to reliably distinguish between colors in charts, and occasionally suffers from hallucination and inconsistency. We conclude by reflecting on the model's strengths and weaknesses as well as the potential utility of models like GPT-4 for future visualization research. We also release all code, stimuli, and results for the task sets at the following link: https://doi.org/10.17605/OSF.IO/F39J6","accessible_pdf":true,"authors":[{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"abendeck3@gatech.edu","is_corresponding":true,"name":"Alexander Bendeck"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"john.stasko@cc.gatech.edu","is_corresponding":false,"name":"John Stasko"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1147","image_caption":"Large vision-language models like GPT-4V are extremely powerful, but we have little understanding of their visualization literacy capabilities. We conduct an empirical evaluation of the GPT-4V model on four tasks from the visualization literature related to visualization literacy: (1) the Visualization Literacy Assessment Test (VLAT); (2) a chart question answering dataset; (3) a set of questions about deceptive visualization design choices; and (4) a set of questions about visualizations with misaligned titles. We also release all materials and code to support future research.","keywords":["Visualization Literacy, Large Language Models, Natural Language"],"open_access_supplemental_link":"https://doi.org/10.17605/OSF.IO/F39J6","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/W3Vrrxo2w74&t=0h48m23s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1147/v-full-1147_Preview.mp4?token=RSeiBOIa3FraoklgtgzwYzc1xP-0gRTZoGxLbeHvDUg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1147/v-full-1147_Preview.srt?token=lZ1QakQa_RMk3QXJ6EXcMjlynz8iPpX8pLJ9QRINsfw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-full","session_youtube_ff_id":"Nr30W716yjI","session_youtube_ff_link":"https://youtu.be/Nr30W716yjI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=0h48m23s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T13:18:00Z","title":"An Empirical Evaluation of the GPT-4 Multimodal Language Model on Visualization Literacy Tasks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1275","abstract":"We developed and validated an instrument to measure the perceived readability in data visualization: PREVis. Researchers and practitioners can easily use this instrument as part of their evaluations to compare the perceived readability of different visual data representations. Our instrument can complement results from controlled experiments on user task performance or provide additional data during in-depth qualitative work such as design iterations when developing a new technique. Although readability is recognized as an essential quality of data visualizations, so far there has not been a unified definition of the construct in the context of visual representations. As a result, researchers often lack guidance for determining how to ask people to rate their perceived readability of a visualization. To address this issue, we engaged in a rigorous process to develop the first validated instrument targeted at the subjective readability of visual data representations. Our final instrument consists of 11 items across 4 dimensions: understandability, layout clarity, readability of data values, and readability of data patterns. We provide the questionnaire as a document with implementation guidelines on osf.io/9cg8j. Beyond this instrument, we contribute a discussion of how researchers have previously assessed visualization readability, and an analysis of the factors underlying perceived readability in visual data representations.","accessible_pdf":false,"authors":[{"affiliations":["LISN, Universit\u00e9 Paris Saclay, CNRS, Orsay, France","Aviz, Inria, Saclay, France"],"email":"acabouat@gmail.com","is_corresponding":true,"name":"Anne-Flore Cabouat"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tingying.he@inria.fr","is_corresponding":false,"name":"Tingying He"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"petra.isenberg@inria.fr","is_corresponding":false,"name":"Petra Isenberg"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"tobias.isenberg@gmail.com","is_corresponding":false,"name":"Tobias Isenberg"}],"award":"honorable","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1275","image_caption":"PREVis is a reliable instrument that allows respondents to rate how readable they find a static data visualization across 4 dimensions: layout clarity, ease of understanding, ease of reading data features, and ease of reading data values. ","keywords":["Visualization, readability, validated instrument, perception, user experiments, empirical methods, methodology"],"open_access_supplemental_link":"https://osf.io/9cg8j","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14908","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/W3Vrrxo2w74&t=0h25m5s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1275/v-full-1275_Preview.mp4?token=s-3k7ce13wMP2CJ8tVZi79RhIyDe-RpGRBaPn6usyD4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1275/v-full-1275_Preview.srt?token=jehYCBF9wLwHrXRT8HS61Er9DvdhUWKlYcYKNXGKLzE&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-full","session_youtube_ff_id":"SmrTAspA0PM","session_youtube_ff_link":"https://youtu.be/SmrTAspA0PM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=0h25m5s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T12:54:00Z","title":"PREVis: Perceived Readability Evaluation for Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1318","abstract":"In this study, we address the growing issue of misleading charts, a prevalent problem that undermines the integrity of information dissemination. Misleading charts can distort the viewer\u2019s perception of data, leading to misinterpretations and decisions based on false information. The development of effective automatic detection methods for misleading charts is an urgent field of research. The recent advancement of multimodal Large Language Models (LLMs) has introduced a promising direction for addressing this challenge. We explored the capabilities of these models in analyzing complex charts and assessing the impact of different prompting strategies on the models\u2019 analyses. We utilized a dataset of misleading charts collected from the internet by prior research and crafted nine distinct prompts, ranging from simple to complex, to test the ability of four different multimodal LLMs in detecting over 21 different chart issues. Through three experiments\u2013from initial exploration to detailed analysis\u2013we progressively gained insights into how to effectively prompt LLMs to identify misleading charts and developed strategies to address the scalability challenges encountered as we expanded our detection range from the initial five issues to 21 issues in the final experiment. Our findings reveal that multimodal LLMs possess a strong capability for chart comprehension and critical thinking in data interpretation. There is significant potential in employing multimodal LLMs to counter misleading information by supporting critical thinking and enhancing visualization literacy. This study demonstrates the applicability of LLMs in addressing the pressing concern of misleading charts.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"yhload@cse.ust.hk","is_corresponding":true,"name":"Leo Yu-Ho Lo"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1318","image_caption":"The paper title is \"How Good (Or Bad) Are LLMs at Detecting Misleading Visualizations?\" On the left hand side, the LLM reponse correctly identified the chart as misleading and gave a relevant reason. On the right hand side, the LLM reponse incorrectly and gave a wrong interpretation. ","keywords":["Deceptive Visualization, Large Language Models, Prompt Engineering"],"open_access_supplemental_link":"https://osf.io/vx526","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.17291","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/W3Vrrxo2w74&t=1h0m23s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1318/v-full-1318_Preview.mp4?token=envXKktkNZwTv1_vrEuKFaxtBG3OCJrz4N0Q-JdsXgo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1318/v-full-1318_Preview.srt?token=XlCfncPmSXCwi9SwrFtEr1wrliOw30bEUuUCihgYlSg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-full","session_youtube_ff_id":"LYcwSpyRxR8","session_youtube_ff_link":"https://youtu.be/LYcwSpyRxR8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=1h0m23s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T13:30:00Z","title":"How Good (Or Bad) Are LLMs in Detecting Misleading Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1422","abstract":"Visualization items\u2014factual questions about visualizations that ask viewers to accomplish visualization tasks\u2014are regularly used in the field of information visualization as educational and evaluative materials. For example, researchers of visualization literacy require large, diverse banks of items to conduct studies where the same skill is measured repeatedly on the same participants. Yet, generating a large number of high-quality, diverse items requires significant time and expertise. To address the critical need for a large number of diverse visualization items in education and research, this paper investigates the potential for large language models (LLMs) to automate the generation of multiple-choice visualization items. Through an iterative design process, we develop the VILA (Visualization Items Generated by Large LAnguage Models) pipeline, for efficiently generating visualization items that measure people\u2019s ability to accomplish visualization tasks. We use the VILA pipeline to generate 1,404 candidate items across 12 chart types and 13 visualization tasks. In collaboration with 11 visualization experts, we develop an evaluation rulebook which we then use to rate the quality of all candidate items. The result is the VILA bank of \u223c1,100 items. From this evaluation, we also identify and classify current limitations of the VILA pipeline, and discuss the role of human oversight in ensuring quality. In addition, we demonstrate an application of our work by creating a visualization literacy test, VILA-VLAT, which measures people\u2019s ability to complete a diverse set of tasks on various types of visualizations; comparing it to the existing VLAT, VILA-VLAT shows moderate to high convergent validity (R = 0.70). Lastly, we discuss the application areas of the VILA pipeline and the VILA bank and provide practical recommendations for their use. All supplemental materials are available at https://osf.io/ysrhq/.","accessible_pdf":false,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"yuancui2025@u.northwestern.edu","is_corresponding":true,"name":"Yuan Cui"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"wanqian.ge@northwestern.edu","is_corresponding":false,"name":"Lily W. Ge"},{"affiliations":["Worcester Polytechnic Institute, Worcester, United States"],"email":"yding5@wpi.edu","is_corresponding":false,"name":"Yiren Ding"},{"affiliations":["Worcester Polytechnic Institute, Worcester, United States"],"email":"ltharrison@wpi.edu","is_corresponding":false,"name":"Lane Harrison"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"fumeng.p.yang@gmail.com","is_corresponding":false,"name":"Fumeng Yang"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1422","image_caption":"Overview of this paper: developing the VILA pipeline, evaluating the candidate bank, and demonstrating a potential application\u2014 the new VILA-VLAT visualization literacy test.","keywords":["Visualization Items, Large Language Models, Visualization Literacy Assessment"],"open_access_supplemental_link":"https://osf.io/ysrhq/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/ysrhq/","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/W3Vrrxo2w74&t=0h36m7s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1422/v-full-1422_Preview.mp4?token=yDyFYRJ4pisyltZsmx0eDNmY6u8zITqwq3wrjK1BCnU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1422/v-full-1422_Preview.srt?token=4IZvyXEtEwm3VyE5Dn48x1WT8TKkCOZep1VTeEXEFMo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-full","session_youtube_ff_id":"dA4Z80m5Rzs","session_youtube_ff_link":"https://youtu.be/dA4Z80m5Rzs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=0h36m7s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T13:06:00Z","title":"Promises and Pitfalls: Using Large Language Models to Generate Visualization Items","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1738","abstract":" As a step towards improving visualization literacy, this work investigates how students approach reading visualizations differently after taking a university-level visualization course. We asked students to verbally walk through their process of making sense of unfamiliar visualizations, and conducted a qualitative analysis of these walkthroughs. Our qualitative analysis found that after taking a visualization course, students engaged with visualizations in more sophisticated ways: they were more likely to exhibit design empathy by thinking critically about the tradeoffs behind why a chart was designed in a particular way, and were better able to deconstruct a chart to make sense of it. We also gave students a quantitative assessment of visualization literacy and found no evidence of scores improving after the class, likely because the test we used focused on a different set of skills than those emphasized in visualization classes. While current measurement instruments for visualization literacy are useful, we propose developing standardized assessments for additional aspects of visualization literacy, such as deconstruction and design empathy. We also suggest that these additional aspects could be incorporated more explicitly in visualization courses. All supplemental materials are available at https://osf.io/w5pum/.","accessible_pdf":false,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"maryam.hedayati@u.northwestern.edu","is_corresponding":true,"name":"Maryam Hedayati"},{"affiliations":["Northwestern University, Chicago, United States"],"email":"matthew.kay@gmail.com","is_corresponding":false,"name":"Matthew Kay"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1738","image_caption":"Participants were randomly assigned to one of two groups. During each study session, they completed the VLAT and a walkthrough of two unfamiliar visualizations. The visualizations they saw in each session were determined by the group they were assigned to. ","keywords":["visualization literacy, visualization pedagogy, graph comprehension, visualization expertise"],"open_access_supplemental_link":"https://osf.io/w5pum/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/kg3am","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/W3Vrrxo2w74&t=0h12m19s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1738/v-full-1738_Preview.mp4?token=HOTx4l7qDky3PAIN5UrGhIlD6SAGmJngXtES3wC48p8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1738/v-full-1738_Preview.srt?token=EDSSAynuvqm7nWks6RBza0id9hZnB1TcnMXF4CUr2f8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-full","session_youtube_ff_id":"j5kScTwQeNk","session_youtube_ff_link":"https://youtu.be/j5kScTwQeNk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=0h12m19s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T12:42:00Z","title":"What University Students Learn In Visualization Classes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243413195","abstract":"With the growing complexity and volume of data, visualizations have become more intricate, often requiring advanced techniques to convey insights. These complex charts are prevalent in everyday life, and individuals who lack knowledge in data visualization may find them challenging to understand. This paper investigates using Large Language Models (LLMs) to help users with low data literacy understand complex visualizations. While previous studies focus on text interactions with users, we noticed that visual cues are also critical for interpreting charts. We introduce an LLM application that supports both text and visual interaction for guiding chart interpretation. Our study with 26 participants revealed that the in-situ support effectively assisted users in interpreting charts and enhanced learning by addressing specific chart-related questions and encouraging further exploration. Visual communication allowed participants to convey their interests straightforwardly, eliminating the need for textual descriptions. However, the LLM assistance led users to engage less with the system, resulting in fewer insights from the visualizations. This suggests that users, particularly those with lower data literacy and motivation, may have over-relied on the LLM agent. We discuss opportunities for deploying LLMs to enhance visualization literacy while emphasizing the need for a balanced approach.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Kiroong Choe"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chaerin Lee"},{"affiliations":"","email":"","is_corresponding":false,"name":"Soohyun Lee"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jiwon Song"},{"affiliations":"","email":"","is_corresponding":false,"name":"Aeri Cho"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nam Wook Kim"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jinwook Seo"}],"award":"","doi":"10.1109/TVCG.2024.3413195","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243413195","image_caption":"Our system allows users to interact with charts using both text and visual inputs. Users can ask questions or share visualizations, and the system will provide the current chart annotations to the LLM agent. The agent can then propose new annotations and suggest follow-up questions for deeper analysis.","keywords":["Visualization literacy, Large language model, Visual communication"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/W3Vrrxo2w74&t=0h0m45s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243413195/v-tvcg-20243413195_Preview.mp4?token=VRK1LBdTylEoAdQv7uQncr9J8VSmYIY9CjKfByD6vto&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243413195/v-tvcg-20243413195_Preview.srt?token=EeqHuoJJEOM3CmdAysIVV8s6o8tsaqPqAnOTVWdOZus&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full28","session_room":"Bayshore I + II + III","session_room_id":"bayshoreplenary","session_title":"Human and Machine Visualization Literacy","session_uid":"v-tvcg","session_youtube_ff_id":"oF7pAKfnhxo","session_youtube_ff_link":"https://youtu.be/oF7pAKfnhxo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/W3Vrrxo2w74&t=0h0m45s","sessions":["Human and Machine Visualization Literacy"],"time_stamp":"2024-10-18T12:30:00Z","title":"Enhancing Data Literacy On-demand: LLMs as Guides for Novices in Chart Interpretation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1140","abstract":"Written language is a useful tool for non-visual creative activities like composing essays and planning searches. This paper investigates the integration of written language into the visualization design process. We create the idea of a 'writing rudder,' which acts as a guiding force or strategy for the designer. Via an interview study of 24 working visualization designers, we first established that only a minority of participants systematically use writingto aid in design. A second study with 15 visualization designers examined four different variants of written rudders: asking questions, stating conclusions, composing a narrative, and writing titles. Overall, participants had a positive reaction; designers recognized the benefits of explicitly writing down components of the design and indicated that they would use this approach in future design work.More specifically, two approaches - writing questions and writing conclusions/takeaways - were seen as beneficial across the design process, while writing narratives showed promise mainly for the creation stage. Although concerns around potential bias during data exploration were raised, participants also discussed strategies to mitigate such concerns. This paper contributes to a deeper understanding of the interplay between language and visualization, and proposes a straightforward, lightweight addition to the visualization design process.","accessible_pdf":false,"authors":[{"affiliations":["UC Berkeley, Berkeley, United States"],"email":"chase_stokes@berkeley.edu","is_corresponding":true,"name":"Chase Stokes"},{"affiliations":["Self, Berkeley, United States"],"email":"clarahu@berkeley.edu","is_corresponding":false,"name":"Clara Hu"},{"affiliations":["UC Berkeley, Berkeley, United States"],"email":"hearst@berkeley.edu","is_corresponding":false,"name":"Marti Hearst"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1140","image_caption":"Main findings from two interview studies. Right: number of participants who currently use writing in visualization design, and with what frequency, in each design step. Both Study 1 and Study 2 found that visualization designers rarely use writing as a concrete design step. Left: Four types of writing rudders tested in Study 2, participants ratings of each type, and examples of participant-written rudders. ","keywords":["Visualization, design, language, text"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.15959","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/55Jz0Cdvl1k&t=0h0m47s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1140/v-full-1140_Preview.mp4?token=Oa5acEgF9yK70H6UaYyGjVl90iRXE9dQ_dGCjqjQ3x0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1140/v-full-1140_Preview.srt?token=m7laA4Mo72JmMkAMFTbu76ZbzbeERu_2BtU6v4TUddU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"ciCUI2ju3tM","session_youtube_ff_link":"https://youtu.be/ciCUI2ju3tM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=0h0m47s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T16:00:00Z","title":"\"It's a Good Idea to Put It Into Words\": Writing 'Rudders' in the Initial Stages of Visualization Design","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1342","abstract":"Genomics experts rely on visualization to extract and share insights from complex and large-scale datasets. Beyond off-the-shelf tools for data exploration, there is an increasing need for platforms that aid experts in authoring customized visualizations for both exploration and communication of insights. A variety of interactive techniques have been proposed for authoring data visualizations, such as template editing, shelf configuration, natural language input, and code editors. However, it remains unclear how genomics experts create visualizations and which techniques best support their visualization tasks and needs. To address this gap, we conducted two user studies with genomics researchers: (1) semi-structured interviews (n=20) to identify the tasks, user contexts, and current visualization authoring techniques and (2) an exploratory study (n=13) using visual probes to elicit users\u2019 intents and desired techniques when creating visualizations. Our contributions include (1) a characterization of how visualization authoring is currently utilized in genomics visualization, identifying limitations and benefits in light of common criteria for authoring tools, and (2) generalizable design implications for genomics visualization authoring tools based on our findings on task- and user-specific usefulness of authoring techniques. All supplemental materials are available at https://osf.io/bdj4v/.","accessible_pdf":false,"authors":[{"affiliations":["Eindhoven University of Technology, Eindhoven, Netherlands"],"email":"a.v.d.brandt@tue.nl","is_corresponding":true,"name":"Astrid van den Brandt"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"sehi_lyi@hms.harvard.edu","is_corresponding":false,"name":"Sehi L'Yi"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"huyen_nguyen@hms.harvard.edu","is_corresponding":false,"name":"Huyen N. Nguyen"},{"affiliations":["Eindhoven University of Technology, Eindhoven, Netherlands"],"email":"a.vilanova@tue.nl","is_corresponding":false,"name":"Anna Vilanova"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1342","image_caption":"Composite illustration summarizing key results from the two user studies. In Study 1 (n=20), we identified five personas based on interviews, characterized by three dimensions: focus, automation, and audience. In Study 2 (n=13), we collected user preferences across eight tasks (T1--T8) for six common authoring techniques: code-based, example-based, natural language input (NLI), shelf configuration, template-based, and visualization-by-demonstration (VbD).","keywords":["User interviews, visual probes, visualization authoring, genomics data visualization"],"open_access_supplemental_link":"https://osf.io/bdj4v/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/6f42j","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/55Jz0Cdvl1k&t=1h4m54s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1342/v-full-1342_Preview.mp4?token=rhgoNF1TgG9XLA_Pd4l2NUgw86TcNAfNZPBjnPYalgE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"Tw14XEoGMAk","session_youtube_ff_link":"https://youtu.be/Tw14XEoGMAk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=1h4m54s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T17:00:00Z","title":"Understanding Visualization Authoring Techniques for Genomics Data in the Context of Personas and Tasks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1393","abstract":"This paper discusses challenges and design strategies in responsive design for thematic maps in information visualization. Thematic maps pose a number of unique challenges for responsiveness, such as inflexible aspect ratios that do not easily adapt to varying screen dimensions, or densely clustered visual elements in urban areas becoming illegible at smaller scales. However, design guidance on how to best address these issues is currently lacking. We conducted design sessions with eight professional designers and developers of web-based thematic maps for information visualization. Participants were asked to redesign a given map for various screen sizes and aspect ratios and to describe their reasoning for when and how they adapted the design. We report general observations of practitioners\u2019 motivations, decision-making processes, and personal design frameworks. We then derive seven challenges commonly encountered in responsive maps, and 17 strategies to address them, such as repositioning elements, segmenting the map, or using alternative visualizations. We compile these challenges and strategies into an illustrated cheat sheet targeted at anyone designing or learning to design responsive maps. The cheat sheet is available online: responsive-vis.github.io/map-cheat-sheet. ","accessible_pdf":false,"authors":[{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"sarah.schoettler@ed.ac.uk","is_corresponding":true,"name":"Sarah Sch\u00f6ttler"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"uhinrich@ed.ac.uk","is_corresponding":false,"name":"Uta Hinrichs"},{"affiliations":["Inria, Bordeaux, France","University of Edinburgh, Edinburgh, United Kingdom"],"email":"bbach@inf.ed.ac.uk","is_corresponding":false,"name":"Benjamin Bach"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1393","image_caption":"Challenges and design solutions for responsive thematic mapping. On the left, seven common challenges in responsive thematic maps, such as areas and symbols being too small or overlapping, are displayed. On the right, 17 possible design solutions are displayed, for example replacing the legend with annotations, separating the map into segments, or scrolling the map.","keywords":["information visualization, responsive visualization, thematic map design"],"open_access_supplemental_link":"https://responsive-vis.github.io/map-cheat-sheet/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.20735","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/55Jz0Cdvl1k&t=0h27m18s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1393/v-full-1393_Preview.mp4?token=lGAUq2xdhP4kfeEG8qYsYRxSmjHdL97R3mzVkkDkGVI&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"mGAIwYY0AN4","session_youtube_ff_link":"https://youtu.be/mGAIwYY0AN4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=0h27m18s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T16:24:00Z","title":"Practices and Strategies in Responsive Thematic Map Design: A Report from Design Workshops with Experts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1414","abstract":"Visualization designers (e.g., journalists or data analysts) often rely on examples to explore the space of possible designs, yet we have little insight into how examples shape data visualization design outcomes. While the effects of examples have been studied in other disciplines, such as web design or engineering, the results are not readily applicable to visualization due to inconsistencies in findings and challenges unique to visualization design. Towards bridging this gap, we conduct an exploratory experiment involving 32 data visualization designers focusing on the influence of five factors (timing, quantity, diversity, data topic similarity, and data schema similarity) on objectively measurable design outcomes (e.g., numbers of designs and idea transfers). Our quantitative analysis shows that when examples are introduced after initial brainstorming, designers curate examples with topics less similar to the dataset they are working on and produce more designs with a high variation in visualization components. Also, designers copy more ideas from examples with higher data schema similarities. Our qualitative analysis of participants\u2019 thought processes provides insights into why designers incorporate examples into their designs, revealing potential factors that have not been previously investigated. Finally, we discuss how our results inform how designers may use examples during design ideation as well as future research on quantifying designs and supporting example-based visualization design. All supplemental materials are available in our OSF repo.","accessible_pdf":true,"authors":[{"affiliations":["University of Maryland, College Park, United States"],"email":"hbako@umd.edu","is_corresponding":true,"name":"Hannah K. Bako"},{"affiliations":["The University of Texas at Austin, Austin, United States"],"email":"xinyi.liu@utexas.edu","is_corresponding":false,"name":"Xinyi Liu"},{"affiliations":["University of Maryland, College Park, United States"],"email":"gko1@terpmail.umd.edu","is_corresponding":false,"name":"Grace Ko"},{"affiliations":["Human Data Interaction Lab, College Park, United States"],"email":"hsong02@cs.umd.edu","is_corresponding":false,"name":"Hyemi Song"},{"affiliations":["University of Washington, Seattle, United States"],"email":"leibatt@cs.washington.edu","is_corresponding":false,"name":"Leilani Battle"},{"affiliations":["University of Maryland, College Park, United States"],"email":"leozcliu@umd.edu","is_corresponding":false,"name":"Zhicheng Liu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1414","image_caption":"The image outlines an exploratory study investigating how the timing and properties of examples influence visualization design outcomes, highlighting key stages from task introduction to final design selection.","keywords":["data visualization, design, examples"],"open_access_supplemental_link":"https://osf.io/sbp2k/wiki/home/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/55Jz0Cdvl1k&t=0h13m26s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1414/v-full-1414_Preview.mp4?token=F_WE7OeLcasWgVwnGc5hrSvn98z1rvAmcvHZCJkobfA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1414/v-full-1414_Preview.srt?token=i8BLTU3xi6RHAD-FotoO67SChOFvRbcLw_ls9ARIPRM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"6Nh--7IK6fw","session_youtube_ff_link":"https://youtu.be/6Nh--7IK6fw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=0h13m26s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T16:12:00Z","title":"Unveiling How Examples Shape Data Visualization Design Outcomes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1613","abstract":"We present a path-based design model and system for designing and creating visualisations. Our model represents a systematic approach to constructing visual representations of data or concepts following a predefined sequence of steps. The initial step involves outlining the overall appearance of the visualisation by creating a skeleton structure, referred to as a flowpath. Subsequently, we specify objects, visual marks, properties, and appearance, storing them in a gene. Lastly, we map data onto the flowpath, ensuring suitable morphisms. Alternative designs are created by exchanging values in the gene. For example, designs that share similar traits, are created by making small incremental changes to the gene. Our design methodology fosters the generation of diverse creative concepts, space-filling visualisations, and traditional formats like bar charts, circular plots and pie charts. Through our implementation we showcase the model in action. As an example application, we integrate the output visualisations onto a smartwatch and visualisation dashboards. In this article we (1) introduce, define and explain the path model and discuss possibilities for its use, (2) present our implementation, results, and evaluation, and (3) demonstrate and evaluate an application of its use on a mobile watch.","accessible_pdf":true,"authors":[{"affiliations":["ExaDev, Gaerwen, United Kingdom","Bangor University, Bangor, United Kingdom"],"email":"james.ogge@gmail.com","is_corresponding":false,"name":"James R Jackson"},{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"p.ritsos@bangor.ac.uk","is_corresponding":false,"name":"Panagiotis D. Ritsos"},{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"p.butcher@bangor.ac.uk","is_corresponding":false,"name":"Peter W. S. Butcher"},{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"j.c.roberts@bangor.ac.uk","is_corresponding":true,"name":"Jonathan C Roberts"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1613","image_caption":"We present a path-based design model and system for designing and creating visualisations. The image shows the Genii visualisation designer tool which demonstrates our flowpath model. Individuals define their own path or choose predefined flowpaths (left panel), drag and drop the visualisation properties into the gene panel (middle), which are rendered onto the gallery (right). Users can either create a new gene which adds a new image to the gallery or edit parameters (through drag and drop) to adapt current visualisations. Crafted visualisations can be exported and used in other applications. ","keywords":["Path-based design, Visualisation Design, Alternative Visualisations"],"open_access_supplemental_link":"https://jamesjacko.github.io/genii/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.03681","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/55Jz0Cdvl1k&t=0h40m35s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1613/v-full-1613_Preview.mp4?token=Uhv0T5i2byoQIGD44Vxa9Jcd8h91FAHp0C8U8pLpvB4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1613/v-full-1613_Preview.srt?token=uwWiwrUqb19_M2oz4wx8ypLVaNjCs-_QJ7yLjzZdpkI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"4GP7AtRD2y4","session_youtube_ff_link":"https://youtu.be/4GP7AtRD2y4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=0h40m35s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T16:36:00Z","title":"Path-based Design Model for Constructing and Exploring Alternative Visualisations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1726","abstract":"User experience in data visualization is typically assessed through post-viewing self-reports, but these overlook the dynamic cognitive processes during interaction. This study explores the use of mind wandering- a phenomenon where attention spontaneously shifts from a primary task to internal, task-related thoughts or unrelated distractions- as a dynamic measure during visualization exploration. Participants reported mind wandering while viewing visualizations from a pre-labeled visualization database and then provided quantitative ratings of trust, engagement, and design quality, along with qualitative descriptions and short-term/long-term recall assessments. Results show that mind wandering negatively affects short-term visualization recall and various post-viewing measures, particularly for visualizations with little text annotation. Further, the type of mind wandering impacts engagement and emotional response. Mind wandering also functions as an intermediate process linking visualization design elements topost-viewing measures, influencing how viewers engage with and interpret visual information over time. Overall, this research underscores the importance of incorporating mind wandering as a dynamic measure in visualization design and evaluation, offering novel avenues for enhancing user engagement and comprehension.","accessible_pdf":true,"authors":[{"affiliations":["Arizona State University, Tempe, United States"],"email":"aarunku5@asu.edu","is_corresponding":true,"name":"Anjana Arunkumar"},{"affiliations":["Northeastern University, Boston, United States"],"email":"l.padilla@northeastern.edu","is_corresponding":false,"name":"Lace M. Padilla"},{"affiliations":["Arizona State University, Tempe, United States"],"email":"cbryan16@asu.edu","is_corresponding":false,"name":"Chris Bryan"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1726","image_caption":"While consuming data visualizations, the mind may wander, exploring diverse ideas, questions, and connections. Viewers may venture opinions on appearance and convention, report visual patterns and trends, integrate external knowledge, or engage in unrelated thoughts. Where does your mind wander and why does it matter?","keywords":["Visualization, Mind Wandering, Cognition, Engagement, Recall"],"open_access_supplemental_link":"https://osf.io/h5awt/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.03576","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/55Jz0Cdvl1k&t=0h52m2s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1726/v-full-1726_Preview.mp4?token=qLuJG8ZPdGGFSTym-fu41zzoGoT8LM1yYZ6-jU99qiA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1726/v-full-1726_Preview.srt?token=yZe83VVDNgBVAT_iHg_uFKWEfNw8cviPFjJviitcPms&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full29","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visualization Design Methods","session_uid":"v-full","session_youtube_ff_id":"WuNz1VKzPLY","session_youtube_ff_link":"https://youtu.be/WuNz1VKzPLY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/55Jz0Cdvl1k&t=0h52m2s","sessions":["Visualization Design Methods"],"time_stamp":"2024-10-17T16:48:00Z","title":"Mind Drifts, Data Shifts: Utilizing Mind Wandering to Track the Evolution of User Experience with Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1277","abstract":"This paper presents a novel end-to-end framework for closed-form computation and visualization of critical point uncertainty in 2D uncertain scalar fields. Critical points are fundamental topological descriptors used in the visualization and analysis of scalar fields. The uncertainty inherent in data (e.g., observational and experimental data, approximations in simulations, and compression), however, creates uncertainty regarding critical point positions. Uncertainty in critical point positions, therefore, cannot be ignored, given their impact on downstream data analysis tasks. In this work, we study uncertainty in critical points as a function of uncertainty in data modeled with probability distributions. Although Monte Carlo (MC) sampling techniques have been used in prior studies to quantify critical point uncertainty, they are often expensive and are infrequently used in production-quality visualization software. We, therefore, propose a new end-to-end framework to address these challenges that comprises a threefold contribution. First, we derive the critical point uncertainty in closed form, which is more accurate and efficient than the conventional MC sampling methods. Specifically, we provide the closed-form and semianalytical (a mix of closed-form and MC methods) solutions for parametric (e.g., uniform, Epanechnikov) and nonparametric models (e.g., histograms) with finite support. Second, we accelerate critical point probability computations using a parallel implementation with the VTK-m library, which is platform portable. Finally, we demonstrate the integration of our implementation with the ParaView software system to demonstrate near-real-time results for real datasets.","accessible_pdf":false,"authors":[{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":true,"name":"Tushar M. Athawale"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"wangz@ornl.gov","is_corresponding":false,"name":"Zhe Wang"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"pugmire@ornl.gov","is_corresponding":false,"name":"David Pugmire"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"kmorel@acm.org","is_corresponding":false,"name":"Kenneth Moreland"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"gongq@ornl.gov","is_corresponding":false,"name":"Qian Gong"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"klasky@ornl.gov","is_corresponding":false,"name":"Scott Klasky"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"paul.rosen@utah.edu","is_corresponding":false,"name":"Paul Rosen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1277","image_caption":"Critical point visualization for the climate dataset. (a) Critical points of the original data are visualized with blue spheres. (b) Noise in the data creates new critical points for which no uncertainty is visualized. (c) Critical point uncertainty is computed and visualized through elevation proportional to critical point probability. Our closed-form solutions implemented with the VTK-m library provide a 1646x speed-up compared to the conventional approach.","keywords":["Topology, uncertainty, critical points, probabilistic analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.18015v1","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/j1W-7oN5gGk&t=0h34m51s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1277/v-full-1277_Preview.mp4?token=FXTjHbPuB0h34s6PnN3eMIszxbIPGY02V9mpQFmdpQM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1277/v-full-1277_Preview.srt?token=XVFHuLXTdhSfIDqag5JM-FL8pQ-q7ZcB-hfwVDjcHpw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-full","session_youtube_ff_id":"kaB1IpYiCCU","session_youtube_ff_link":"https://youtu.be/kaB1IpYiCCU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=0h34m51s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T13:06:00Z","title":"Uncertainty Visualization of Critical Points of 2D Scalar Fields for Parametric and Nonparametric Probabilistic Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1461","abstract":"This paper presents a practical approach for the optimization of topological simplification, a central pre-processing step for the analysis and visualization of scalar data. Given an input scalar field f and a set of \u201csignal\u201d persistence pairs to maintain, our approaches produces an output field g that is close to f and which optimizes (i) the cancellation of \u201cnon-signal\u201d pairs, while (ii) preserving the \u201csignal\u201d pairs. In contrast to pre-existing simplification approaches, our method is not restricted to persistence pairs involving extrema and can thus address a larger class of topological features, in particular saddle pairs in three-dimensional scalar data. Our approach leverages recent generic persistence optimization frameworks and extends them with tailored accelerations specific to the problem of topological simplification. Extensive experiments report substantial accelerations over these frameworks, thereby making topological simplification optimization practical for real-life datasets. Our work enables a direct visualization and analysis of the topologically simplified data, e.g., via isosurfaces of simplified topology (fewer components and handles). We apply our approach to the extraction of prominent filament structures in three-dimensional data. Specifically, we show that our pre-simplification of the data leads to practical improvements over standard topological techniques for removing filament loops. We also show how our framework can be used to repair genus defects in surface processing. Finally, we provide a C++ implementation for reproducibility purposes.","accessible_pdf":false,"authors":[{"affiliations":["CNRS, Paris, France","SORBONNE UNIVERSITE, Paris, France"],"email":"mohamed.kissi@lip6.fr","is_corresponding":true,"name":"Mohamed KISSI"},{"affiliations":["CNRS, Paris, France","Sorbonne Universit\u00e9, Paris, France"],"email":"mathieu.pont@lip6.fr","is_corresponding":false,"name":"Mathieu Pont"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"josh@cs.arizona.edu","is_corresponding":false,"name":"Joshua A Levine"},{"affiliations":["CNRS, Paris, France","Sorbonne Universit\u00e9, Paris, France"],"email":"julien.tierny@sorbonne-universite.fr","is_corresponding":false,"name":"Julien Tierny"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1461","image_caption":"Topological simplification of a dark matter density field in a cosmology dataset. The cosmic web geometry is depicted by an isosurface at isovalue 0.4, with core filament structures extracted via upward discrete integral lines from 2-saddles above 0.4. Our approach reduced the number of undesired topological features by 92%, leading to a less cluttered visualization. This simplifies the topology, removing noisy components and small-scale handles, as shown in the inset zooms. This also results in fewer skips in persistent saddle connector reversals, revealing the primary filament structure more clearly.","keywords":["Topological Data Analysis, scalar data, simplification, feature extraction."],"open_access_supplemental_link":"https://github.com/MohamedKISSI/Code-Paper-A-Pratical-Solver-for-Scalar-Data-Topological-Simplification","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2407.12399","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/j1W-7oN5gGk&t=0h25m4s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1461/v-full-1461_Preview.mp4?token=8hD_BYjbZdeNNM6x4XqOfZp21w509Z-xWOKfOVEKzT0&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-full","session_youtube_ff_id":"PJ_tDek0d88","session_youtube_ff_link":"https://youtu.be/PJ_tDek0d88","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=0h25m4s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T12:54:00Z","title":"A Practical Solver for Scalar Data Topological Simplification","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1494","abstract":"Topological abstractions offer a method to summarize the behavior of vector fields, but computing them robustly can be challenging due to numerical precision issues. One alternative is to represent the vector field using a discrete approach, which constructs a collection of pairs of simplices in the input mesh that satisfies criteria introduced by Forman\u2019s discrete Morse theory. While numerous approaches exist to compute pairs in the restricted case of the gradient of a scalar field, state-of-the-art algorithms for the general case of vector fields require expensive optimization procedures. This paper introduces a fast, novel approach for pairing simplices of two-dimensional, triangulated vector fields that do not vary in time. The key insight of our approach is that we can employ a local evaluation, inspired by the approach used to construct a discrete gradient field, where every simplex in a mesh is considered by no more than one of its vertices. Specifically, we observe that for any edge in the input mesh, we can uniquely assign an outward direction of flow. We can further expand this consistent notion of outward flow at each vertex, which corresponds to the concept of a downhill flow in the case of scalar fields. Working with outward flow enables a linear-time algorithm that processes the (outward) neighborhoods of each vertex one-by-one, similar to the approach used for scalar fields. We couple our approach to constructing discrete vector fields with a method to extract, simplify, and visualize topological features. Empirical results on analytic and simulation data demonstrate drastic improvements in running time, produce features similar to the current state-of-the-art, and show the application of simplification to large, complex flows","accessible_pdf":false,"authors":[{"affiliations":["University of Arizona, Tucson, United States"],"email":"finkent@arizona.edu","is_corresponding":true,"name":"Tanner Finken"},{"affiliations":["Sorbonne Universit\u00e9, Paris, France"],"email":"julien.tierny@sorbonne-universite.fr","is_corresponding":false,"name":"Julien Tierny"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"josh@cs.arizona.edu","is_corresponding":false,"name":"Joshua A Levine"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1494","image_caption":"We extract and simplify a vector field of ocean currents using our technique. The input mesh has over 48 million simplices, and the original flow results in over 65000 critical points. We simplify to approximately 2000 critical points using a discrete representation of the field. Computing the original field for a domain this big takes only 4 minutes and computing complete simplification takes approximately 10 minutes.","keywords":["Flow visualization, discrete Morse theory, topological data analysis"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2408.04769","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/j1W-7oN5gGk&t=0h13m20s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1494/v-full-1494_Preview.mp4?token=usx194xYVR8-djPko3ohF_0PYny_K5RbH2x0Fqx0ZFw&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-full","session_youtube_ff_id":"OzB9wNzCmRc","session_youtube_ff_link":"https://youtu.be/OzB9wNzCmRc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=0h13m20s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T12:42:00Z","title":"Localized Evaluation for Constructing Discrete Vector Fields","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1574","abstract":"The numerical extraction of vortex cores from time-dependent fluid flow attracted much attention over the past decades. A commonly agreed upon vortex definition remained elusive since a proper vortex core needs to satisfy two hard constraints: it must be objective and Lagrangian. Recent methods on objectivization met the first but not the second constraint, since there was no formal guarantee that the resulting vortex coreline is indeed a pathline of the fluid flow. In this paper, we propose the first vortex core definition that is both objective and Lagrangian. Our approach restricts observer motions to follow along pathlines, which reduces the degrees of freedoms: we only need to optimize for an observer rotation that makes the observed flow as steady as possible. This optimization succeeds along Lagrangian vortex corelines and will result in a non-zero time-partial everywhere else. By performing this optimization at each point of a spatial grid, we obtain a residual scalar field, which we call vortex deviation error. The local minima on the grid serve as seed points for a gradient descent optimization that delivers sub-voxel accurate corelines. The visualization of both 2D and 3D vortex cores is based on the separation of the movement of the vortex core and the swirling flow behavior around it. While the vortex core is represented by a pathline, the swirling motion around it is visualized by streamlines in the correct frame. We demonstrate the utility of the approach on several 2D and 3D time-dependent vector fields.","accessible_pdf":false,"authors":[{"affiliations":["Friedrich-Alexander-University Erlangen-N\u00fcrnberg, Erlangen, Germany"],"email":"tobias.guenther@fau.de","is_corresponding":true,"name":"Tobias G\u00fcnther"},{"affiliations":["University of Magdeburg, Magdeburg, Germany"],"email":"theisel@ovgu.de","is_corresponding":false,"name":"Holger Theisel"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1574","image_caption":"In this paper, we present the first finite-time approach that extracts objective vortex corelines, which are guaranteed to be pathlines of the underlying flow. Our key idea is to restrict the motion of the observer to always follow along particle trajectories, which incidentally also reduces the degrees of freedom in the reference frame optimization. We derive the method for 2D and 3D time-dependent flow.","keywords":["Flow visualization, vortices, objective methods"],"open_access_supplemental_link":"https://doi.org/10.5281/zenodo.12750719","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/j1W-7oN5gGk&t=0h0m28s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1574/v-full-1574_Preview.mp4?token=1dzHZmqY0ScY43iHcq0r_TadN7TuEBg0fKQ4q1NmftE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1574/v-full-1574_Preview.srt?token=fVx8Bc3PS0li54gRmGAIQ0fnF-lBMxgiFhzZIe4XS2M&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-full","session_youtube_ff_id":"uzDwMGgfoLE","session_youtube_ff_link":"https://youtu.be/uzDwMGgfoLE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=0h0m28s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T12:30:00Z","title":"Objective Lagrangian Vortex Cores and their Visual Representations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1163","abstract":"Integral curves have been widely used to represent and analyze various vector fields. In this paper, we propose a Curve Segment Neighborhood Graph (CSNG) to capture the relationships between neighboring curve segments. This graph representation enables us to adapt the fast community detection algorithm, i.e., the Louvain algorithm, to identify individual graph communities from CSNG. Our results show that these communities often correspond to the features of the flow. To achieve a multi-level interactive exploration of the detected communities, we adapt a force-directed layout that allows users to refine and re-group communities based on their domain knowledge. We incorporate the proposed techniques into an interactive system to enable effective analysis and interpretation of complex patterns in large-scale integral curve datasets.","accessible_pdf":false,"authors":[{"affiliations":["University of Houston, Houston, United States"],"email":"nguyenpkk95@gmail.com","is_corresponding":true,"name":"Nguyen K Phan"},{"affiliations":["University of Houston, Houston, United States"],"email":"chengu@cs.uh.edu","is_corresponding":false,"name":"Guoning Chen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1163","image_caption":"A visualization of the (1) 3D streamlines dataset of the Solar Plume dataset on the left side, color-coded by their respective communities and (2) the community force-directed graph created using Louvain community detection at resolution = 0.7 on the right side.","keywords":["Vector field, neighbor search, community detection"],"open_access_supplemental_link":"https://github.com/MangoLion/CSN_VIS","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/j1W-7oN5gGk&t=1h2m16s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1163/v-short-1163_Preview.mp4?token=y8Q6ujlvBsJmxc_2_KRHXnVLngAMr1C9sm2ecSiBoS8&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1163/v-short-1163_Preview.srt?token=DaGRuL20pAfTZbKmqrGbCQYb42f72kTxmCOSBU7UtZk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-short","session_youtube_ff_id":"5HB_dbyxo_Q","session_youtube_ff_link":"https://youtu.be/5HB_dbyxo_Q","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=1h2m16s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T13:30:00Z","title":"Curve Segment Neighborhood-based Vector Field Exploration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243350076","abstract":"Ensembles of contours arise in various applications like simulation, computer-aided design, and semantic segmentation. Uncovering ensemble patterns and analyzing individual members is a challenging task that suffers from clutter. Ensemble statistical summarization can alleviate this issue by permitting analyzing ensembles' distributional components like the mean and median, confidence intervals, and outliers. Contour boxplots, powered by Contour Band Depth (CBD), are a popular non-parametric ensemble summarization method that benefits from CBD's generality, robustness, and theoretical properties. In this work, we introduce Inclusion Depth (ID), a new notion of contour depth with three defining characteristics. First, ID is a generalization of functional Half-Region Depth, which offers several theoretical guarantees. Second, ID relies on a simple principle: the inside/outside relationships between contours. This facilitates implementing ID and understanding its results. Third, the computational complexity of ID scales quadratically in the number of members of the ensemble, improving CBD's cubic complexity. This also in practice speeds up the computation enabling the use of ID for exploring large contour ensembles or in contexts requiring multiple depth evaluations like clustering. In a series of experiments on synthetic data and case studies with meteorological and segmentation data, we evaluate ID's performance and demonstrate its capabilities for the visual analysis of contour ensembles.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Nicolas F. Chaves-de-Plaza"},{"affiliations":"","email":"","is_corresponding":false,"name":"Prerak Mody"},{"affiliations":"","email":"","is_corresponding":false,"name":"Marius Staring"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ren\u00e9 van Egmond"},{"affiliations":"","email":"","is_corresponding":false,"name":"Anna Vilanova"},{"affiliations":"","email":"","is_corresponding":false,"name":"Klaus Hildebrandt"}],"award":"","doi":"10.1109/TVCG.2024.3350076","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243350076","image_caption":"Inclusion Depth is a new contour depth notion that uses inside/outside relationships between contours to compute their depth significantly faster than existing methods like Contour Band Depth. Use the QR code to explore the Contour Depth Python library!","keywords":["Uncertainty visualization, contours, ensemble summarization, depth statistics."],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/j1W-7oN5gGk&t=0h49m11s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243350076/v-tvcg-20243350076_Preview.mp4?token=Bwl1l3rFMHAWUZgDBi1MFY8QhCk4fngkcd9WIpAIqBo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243350076/v-tvcg-20243350076_Preview.srt?token=3vy829ZDAcuAPjymyRN_V43xHj56G3yrezptt-DHLmU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Flow, Topology, and Uncertainty","session_uid":"v-tvcg","session_youtube_ff_id":"IkbcvwKb3Ic","session_youtube_ff_link":"https://youtu.be/IkbcvwKb3Ic","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/j1W-7oN5gGk&t=0h49m11s","sessions":["Flow, Topology, and Uncertainty"],"time_stamp":"2024-10-18T13:18:00Z","title":"Inclusion Depth for Contour Ensembles","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1155","abstract":"Interactive visualizations are powerful tools for Exploratory Data Analysis (EDA), but how do they affect the observations analysts make about their data? We conducted a qualitative experiment with 13 professional data scientists analyzing two datasets with Jupyter notebooks, collecting a rich dataset of interaction traces and think-aloud utterances. By qualitatively coding participant utterances, we introduce a formalism that describes EDA as a sequence of analysis states, where each state is comprised of either a representation an analyst constructs (e.g., the output of a data frame, an interactive visualization, etc.) or an observation the analyst makes (e.g., about missing data, the relationship between variables, etc.). By applying our formalism to our dataset, we identify that interactive visualizations, on average, lead to earlier and more complex insights about relationships between dataset attributes compared to static visualizations. Moreover, by calculating metrics such as revisit count and representational diversity, we uncover that some representations serve more as \"planning aids\" during EDA rather than tools strictly for hypothesis-answering. We show how these measures help identify other patterns of analysis behavior, such as the \"80-20 rule\", where a small subset of representations drove the majority of observations. Based on these findings, we offer design guidelines for interactive exploratory analysis tooling and reflect on future directions for studying the role that visualizations play in EDA. ","accessible_pdf":true,"authors":[{"affiliations":["MIT, Cambridge, United States"],"email":"dwootton@mit.edu","is_corresponding":true,"name":"Dylan Wootton"},{"affiliations":["MIT, Cambridge, United States"],"email":"amyraefoxphd@gmail.com","is_corresponding":false,"name":"Amy Rae Fox"},{"affiliations":["University of Colorado Boulder, Boulder, United States"],"email":"evan.peck@colorado.edu","is_corresponding":false,"name":"Evan Peck"},{"affiliations":["MIT, Cambridge, United States"],"email":"arvindsatya@mit.edu","is_corresponding":false,"name":"Arvind Satyanarayan"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1155","image_caption":"A diagram illustrating a mixed-methods study of Exploratory Data Analysis (EDA) practices. The left section shows 13 data scientists conducting two EDAs, first with static charts, then with static and interactive charts. Think-aloud utterances and interaction traces are collected from these sessions. The middle section depicts how this data is processed: utterances are coded via content analysis to create observations, which are combined with interaction data to form a comprehensive dataset of EDA sessions. EDA metrics such as revisit rate and hover time are computed from this dataset. The right section demonstrates a formal description of EDA sessions, showing examples of how participants' actions and observations are encoded, including creating visualizations, commenting on distributions, and identifying relationships using various chart types. This systematic approach combines qualitative data collection with quantitative analysis to provide insights into EDA behaviors and strategies.","keywords":["Interaction Design, Methodologies, HumanQual, HumanQuant."],"open_access_supplemental_link":"https://osf.io/bu7je/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HC69aABUJuc&t=0h0m30s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1155/v-full-1155_Preview.srt?token=BZU6Pn7B-4AbZfqQ-GNhRkm97YTfS1r2nPHEQrnbDhg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-full","session_youtube_ff_id":"CNQni-VZ4FI","session_youtube_ff_link":"https://youtu.be/CNQni-VZ4FI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h0m30s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T17:45:00Z","title":"Charting EDA: How Visualizations and Interactions Shape Analysis in Computational Notebooks.","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1204","abstract":"We present ProvenanceWidgets, a Javascript library of UI control elements such as radio buttons, checkboxes, and dropdowns to track and dynamically overlay a user's analytic provenance. These in situ overlays not only save screen space but also minimize the amount of time and effort needed to access the same information from elsewhere in the UI. In this paper, we discuss how we design modular UI control elements to track how often and how recently a user interacts with them and design visual overlays showing an aggregated summary as well as a detailed temporal history. We demonstrate the capability of ProvenanceWidgets by recreating three prior widget libraries: (1) Scented Widgets, (2) Phosphor objects, and (3) Dynamic Query Widgets. We also evaluated its expressiveness and conducted case studies with visualization developers to evaluate its effectiveness. We find that ProvenanceWidgets enables developers to implement custom provenance-tracking applications effectively. ProvenanceWidgets is available as open-source software at https://github.com/ProvenanceWidgets to help application developers build custom provenance-based systems.","accessible_pdf":true,"authors":[{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"arpitnarechania@gatech.edu","is_corresponding":true,"name":"Arpit Narechania"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"kaustubhodak1@gmail.com","is_corresponding":false,"name":"Kaustubh Odak"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"melassady@ai.ethz.ch","is_corresponding":false,"name":"Mennatallah El-Assady"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"endert@gatech.edu","is_corresponding":false,"name":"Alex Endert"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1204","image_caption":"ProvenanceWidgets is a new open-source JavaScript library of UI controls such as range sliders and dropdowns to track and dynamically overlay analytic provenance. Install it as \"npm install provenance-widgets\".","keywords":["Provenance, Analytic provenance, Visualization, UI controls, GUI elements, JavaScript library."],"open_access_supplemental_link":"https://github.com/ProvenanceWidgets/Supplemental-Material","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2407.17431","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HC69aABUJuc&t=0h58m10s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1204/v-full-1204_Preview.mp4?token=NcgZCl_jM23zSL86QflPmp1ABUfon9PW1Ai_9rwx-P0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1204/v-full-1204_Preview.srt?token=u4yM076oyfVu5TTGXjNWLCvxEI_pJKmW42rfn9048AU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-full","session_youtube_ff_id":"Ed1cZDTTFd0","session_youtube_ff_link":"https://youtu.be/Ed1cZDTTFd0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h58m10s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T18:45:00Z","title":"ProvenanceWidgets: A Library of UI Control Elements to Track and Dynamically Overlay Analytic Provenance","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1251","abstract":"Exploratory data science is an iterative process of obtaining, cleaning, profiling, analyzing, and interpreting data. This cyclical way of working creates challenges within the linear structure of computational notebooks, leading to issues with code quality, recall, and reproducibility. To remedy this, we present Loops, a set of visual support techniques for iterative and exploratory data analysis in computational notebooks. Loops leverages provenance information to visualize the impact of changes made within a notebook. In visualizations of the notebook provenance, we trace the evolution of the notebook over time and highlight differences between versions. Loops visualizes the provenance of code, markdown, tables, visualizations, and images and their respective differences. Analysts can explore these differences in detail in a separate view. Loops not only makes the analysis process transparent but also supports analysts in their data science work by showing the effects of changes and facilitating comparison of multiple versions. We demonstrate our approach's utility and potential impact in two use cases and feedback from notebook users from various backgrounds. This paper and all supplemental materials are available at https://osf.io/79eyn.","accessible_pdf":false,"authors":[{"affiliations":["Johannes Kepler University Linz, Linz, Austria"],"email":"klaus@eckelt.info","is_corresponding":true,"name":"Klaus Eckelt"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"kirangadhave2@gmail.com","is_corresponding":false,"name":"Kiran Gadhave"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"alex@sci.utah.edu","is_corresponding":false,"name":"Alexander Lex"},{"affiliations":["Johannes Kepler University Linz, Linz, Austria"],"email":"marc.streit@jku.at","is_corresponding":false,"name":"Marc Streit"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1251","image_caption":"Loops tracks and visualizes the provenance of computational notebooks. Compact and detailed visualizations of the notebook's history trace the evolution of the notebook over time and highlight differences between versions. Loops visualizes the provenance of code, markdown, tables, visualizations, and images and can explicitly encode their differences.","keywords":["Comparative visualization, computational notebooks, provenance, data science"],"open_access_supplemental_link":"https://osf.io/hxuak/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/preprints/osf/79eyn","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HC69aABUJuc&t=0h24m28s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1251/v-full-1251_Preview.mp4?token=hfiHy_SBFkr3Cp8zrhAzomuDkTB9gV-gF67EgA19M08&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-full","session_youtube_ff_id":"2l7HgOd2NIY","session_youtube_ff_link":"https://youtu.be/2l7HgOd2NIY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h24m28s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T18:09:00Z","title":"Loops: Leveraging Provenance and Visualization to Support Exploratory Data Analysis in Notebooks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1730","abstract":"Understanding the input and output of data wrangling scripts is crucial for various tasks like debugging code and onboarding new data. However, existing research on script understanding primarily focuses on revealing the process of data transformations, lacking the ability to analyze the potential scope, i.e., the space of script inputs and outputs. Meanwhile, constructing input/output space during script analysis is challenging, as the wrangling scripts could be semantically complex and diverse, and the association between different data objects is intricate. To facilitate data workers in understanding the input and output space of wrangling scripts, we summarize ten types of constraints to express table space and build a mapping between data transformations and these constraints to guide the construction of the input/output for individual transformations. Then, we propose a constraint generation model for integrating table constraints across multiple transformations. Based on the model, we develop Ferry, an interactive system that extracts and visualizes the data constraints describing the input and output space of data wrangling scripts, thereby enabling users to grasp the high-level semantics of complex scripts and locate the origins of faulty data transformations. Besides, Ferry provides example input and output data to assist users in interpreting the extracted constraints and checking and resolving the conflicts between these constraints and any uploaded dataset. Ferry\u2019s effectiveness and usability are evaluated through two usage scenarios and two case studies, including understanding, debugging, and checking both single and multiple scripts, with and without executable data. Furthermore, an illustrative application is presented to demonstrate Ferry\u2019s flexibility.","accessible_pdf":false,"authors":[{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"rickyluozs@gmail.com","is_corresponding":true,"name":"Zhongsu Luo"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"kaixiong@zju.edu.cn","is_corresponding":false,"name":"Kai Xiong"},{"affiliations":["Zhejiang University, Hangzhou,Zhejiang, China"],"email":"3220105578@zju.edu.cn","is_corresponding":false,"name":"Jiajun Zhu"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"chenran928@zju.edu.cn","is_corresponding":false,"name":"Ran Chen"},{"affiliations":["Newcastle University, Newcastle Upon Tyne, United Kingdom"],"email":"xinhuan.shu@gmail.com","is_corresponding":false,"name":"Xinhuan Shu"},{"affiliations":["Zhejiang University, Ningbo, China"],"email":"dweng@zju.edu.cn","is_corresponding":false,"name":"Di Weng"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1730","image_caption":"The user interface of Ferry. Ferry is an interactive system that uses a constraint-based approach to help data workers understand the input/output space of data wrangling scripts. It aids in comprehending this space through constraint icon and constraint tag, combined with sample data. Additionally, Ferry detects conflicts between requirements and scripts, facilitating efficient scripts reuse and debugging.","keywords":["Data wrangling, Visual analytics, Constraints, Program understanding"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HC69aABUJuc&t=0h12m55s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1730/v-full-1730_Preview.mp4?token=Op3_qrWX97c6NMeKmmpmOyBBKivPzJrkuUPEMAy2z2M&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1730/v-full-1730_Preview.srt?token=WwUEr4W9Iji8zAoBGgwCuRJr23SJXZt2SGzLiIiVuPY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-full","session_youtube_ff_id":"C0yhkKGlj7k","session_youtube_ff_link":"https://youtu.be/C0yhkKGlj7k","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h12m55s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T17:57:00Z","title":"Ferry: Toward Better Understanding of Input/Output Space for Data Wrangling Scripts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1830","abstract":"Over the past decade, several urban visual analytics systems and tools have been proposed to tackle a host of challenges faced by cities, in areas as diverse as transportation, weather, and real estate. Many of these tools have been designed through collaborations with urban experts, aiming to distill intricate urban analysis workflows into interactive visualizations and interfaces. However, the design, implementation, and practical use of these tools still rely on siloed approaches, resulting in bespoke applications that are difficult to reproduce and extend. At the design level, these tools undervalue rich data workflows from urban experts, typically treating them only as data providers and evaluators. At the implementation level, they lack interoperability with other technical frameworks. At the practical use level, they tend to be narrowly focused on specific fields, inadvertently creating barriers to cross-domain collaboration. To address these gaps, we present Curio, a framework for collaborative urban visual analytics. Curio uses a dataflow model with multiple abstraction levels (code, grammar, GUI elements) to facilitate collaboration across the design and implementation of visual analytics components. The framework allows experts to intertwine data preprocessing, management, and visualization stages while tracking the provenance of code and visualizations. In collaboration with urban experts, we evaluate Curio through a diverse set of usage scenarios targeting urban accessibility, urban microclimate, and sunlight access. These scenarios use different types of data and domain methodologies to illustrate Curio's flexibility in tackling pressing societal challenges. Curio is available at https://urbantk.org/curio.","accessible_pdf":false,"authors":[{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"gmorei3@uic.edu","is_corresponding":true,"name":"Gustavo Moreira"},{"affiliations":["University of California, Berkeley, Berkeley, United States","Massachusetts Institute of Technology , Somerville, United States"],"email":"maryamh@mit.edu","is_corresponding":false,"name":"Maryam Hosseini"},{"affiliations":["University of Illinois Urbana-Champaign, Urbana-Champaign, United States"],"email":"carolvfs@illinois.edu","is_corresponding":false,"name":"Carolina Veiga"},{"affiliations":["Universidade Federal Fluminense, Niteroi, Brazil"],"email":"lucasalexandre.s.cc@gmail.com","is_corresponding":false,"name":"Lucas Alexandre"},{"affiliations":["Politecnico di Milano, Milano, Italy"],"email":"nicola.colaninno@polimi.it","is_corresponding":false,"name":"Nicola Colaninno"},{"affiliations":["Universidade Federal Fluminense, Niter\u00f3i, Brazil"],"email":"danielcmo@ic.uff.br","is_corresponding":false,"name":"Daniel de Oliveira"},{"affiliations":["Universidade Federal de Pernambuco, Recife, Brazil"],"email":"nivan@cin.ufpe.br","is_corresponding":false,"name":"Nivan Ferreira"},{"affiliations":["Universidade Federal Fluminense , Niteroi, Brazil"],"email":"mlage@ic.uff.br","is_corresponding":false,"name":"Marcos Lage"},{"affiliations":["University of Illinois Chicago, Chicago, United States"],"email":"fabiom@uic.edu","is_corresponding":false,"name":"Fabio Miranda"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1830","image_caption":"The rise of urban data rise has led experts to address societal challenges using data-driven methods. Yet, effective analysis requires diverse resources and complex workflows. Current tools like urban visual analytics applications and computational notebooks often fall short. To address these challenges, we propose Curio, a provenance-aware collaborative framework for urban visual analytics. Curio allows users to build and iterate on dataflows with reusable modules, supporting collaborative design and tracking of changes. We evaluated Curio with domain experts through a set of case studies focusing on urban accessibility, climate, and sunlight access.","keywords":["Urban analytics, urban data, spatial data, dataflow, provenance, visualization framework, visualization system"],"open_access_supplemental_link":"https://urbantk.org/curio","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.06139","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HC69aABUJuc&t=0h47m45s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1830/v-full-1830_Preview.mp4?token=PMw01OvmxRM3Dmn59e5pWrPncdeYM4gUN3dS9qUjibM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1830/v-full-1830_Preview.srt?token=xH83hAsr19T2OlPPJKG6USmJSlzqV9swdUgxjw-rkPY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-full","session_youtube_ff_id":"phFXjrH7_ns","session_youtube_ff_link":"https://youtu.be/phFXjrH7_ns","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h47m45s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T18:33:00Z","title":"Curio: A Dataflow-Based Framework for Collaborative Urban Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243354561","abstract":"Interactive visualization can support fluid exploration but is often limited to predetermined tasks. Scripting can support a vast range of queries but may be more cumbersome for free-form exploration. Embedding interactive visualization in scripting environments, such as computational notebooks, provides an opportunity to leverage the strengths of both direct manipulation and scripting. We investigate interactive visualization design methodology, choices, and strategies under this paradigm through a design study of calling context trees used in performance analysis, a field which exemplifies typical exploratory data analysis workflows with big data and hard to define problems. We first produce a formal task analysis assigning tasks to graphical or scripting contexts based on their specificity, frequency, and suitability. We then design a notebook-embedded interactive visualization and validate it with intended users. In a follow-up study, we present participants with multiple graphical and scripting interaction modes to elicit feedback about notebook-embedded visualization design, finding consensus in support of the interaction model. We report and reflect on observations regarding the process and design implications for combining visualization and scripting in notebooks.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Connor Scully-Allison"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ian Lumsden"},{"affiliations":"","email":"","is_corresponding":false,"name":"Katy Williams"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jesse Bartels"},{"affiliations":"","email":"","is_corresponding":false,"name":"Michela Taufer"},{"affiliations":"","email":"","is_corresponding":false,"name":"Stephanie Brink"},{"affiliations":"","email":"","is_corresponding":false,"name":"Abhinav Bhatele"},{"affiliations":"","email":"","is_corresponding":false,"name":"Olga Pearce"},{"affiliations":"","email":"","is_corresponding":false,"name":"Katherine E. Isaacs"}],"award":"","doi":"10.1109/TVCG.2024.3354561","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243354561","image_caption":"Our model for assigning tasks to interactive visualization or scripting modalities when designing notebook embedded visualizations. Task frequency and specificity inform preferred modalities. Highly specific tasks, such as complex queries with precise numbers can be assigned to scripting as they offered expressivity and efficiency to scripting-familiar audience over complex visual interfaces. Less-specific, more frequent tasks like finding anomalies can be assigned to visualization as they supports multiple forms of recognition and browsing. We note many tasks can be supported by both, with a hand-off as the analysis grows from more exploratory to more concrete.","keywords":["Exploratory Data Analysis, Interactive Data Analysis, Computational Notebooks, Hybrid Visualization-Scripting, Visualization Design"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/HC69aABUJuc&t=0h37m20s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243354561/v-tvcg-20243354561_Preview.mp4?token=9Ez-32rWEPjm7Uv5glU87qDJ3Y9tQUzKfRHrq9Xynvc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243354561/v-tvcg-20243354561_Preview.srt?token=H5SXYSVTxWe2GR7CEQohT2N1uMy5Hcdk_oVGrP_u0Gg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full30","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Scripts, Notebooks, and Provenance","session_uid":"v-tvcg","session_youtube_ff_id":"67Um_JEdwEk","session_youtube_ff_link":"https://youtu.be/67Um_JEdwEk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/HC69aABUJuc&t=0h37m20s","sessions":["Scripts, Notebooks, and Provenance"],"time_stamp":"2024-10-16T18:21:00Z","title":"Design Concerns for Integrated Scripting and Interactive Visualization in Notebook Environments","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1218","abstract":"Placing text labels is a common way to explain key elements in a given scene. Given a graphic input and original label information, how to place labels to meet both geometric and aesthetic requirements is an open challenging problem. Geometry-wise, traditional rule-driven solutions struggle to capture the complex interactions between labels, let alone consider graphical/appearance content. In terms of aesthetics, training/evaluation data ideally require nontrivial effort and expertise in design, thus resulting in a lack of decent datasets for learning-based methods. To address the above challenges, we formulate the task with a graph representation, where nodes correspond to labels and edges to interactions between labels, and treat label placement as a node position prediction problem. With this novel representation, we design a Label Placement Graph Transformer (LPGT) to predict label positions. Specifically, edge-level attention, conditioned on node representations, is introduced to reveal potential relationships between labels. To integrate graphic/image information, we design a feature aligning strategy that extracts deep features for nodes and edges efficiently. Next, to address the dataset issue, we collect commercial illustrations with professionally designed label layouts from household appliance manuals, and annotate them with useful information to create a novel dataset named the Appliance Manual Illustration Labels (AMIL) dataset. In the thorough evaluation on AMIL, our LPGT solution achieves promising label placement performance compared with popular baselines. Our algorithm is available at https://github.com/JingweiQu/LPGT.","accessible_pdf":false,"authors":[{"affiliations":["Southwest University, Beibei, China"],"email":"qujingwei@swu.edu.cn","is_corresponding":true,"name":"Jingwei Qu"},{"affiliations":["Southwest University, Chongqing, China"],"email":"z2211973606@email.swu.edu.cn","is_corresponding":false,"name":"Pingshun Zhang"},{"affiliations":["Southwest University, Beibei, China"],"email":"enyuche@gmail.com","is_corresponding":false,"name":"Enyu Che"},{"affiliations":["COLLEGE OF COMPUTER AND INFORMATION SCIENCE, SOUTHWEST UNIVERSITY SCHOOL OF SOFTWAREC, Chongqin, China"],"email":"out1147205215@outlook.com","is_corresponding":false,"name":"Yinan Chen"},{"affiliations":["Stony Brook University, New York, United States"],"email":"hling@cs.stonybrook.edu","is_corresponding":false,"name":"Haibin Ling"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1218","image_caption":"GNN-driven label placement. For a set of labels to be placed in a graphic, Label Placement Graph Transformer (LPGT) predicts the label layout given the graphic and raw label information. First, a complete graph is constructed to capture the relationship between labels. Its node and edge features are generated from the label information and image features. Next, given the graph as input, LPGT iteratively learns the displacements of the nodes by a sequence of GNN modules. The graph is updated by each module and taken as input for the next module.","keywords":["Label placement, Graph neural network, Transformer"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/FLsXwoR_H8E&t=0h48m12s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1218/v-full-1218_Preview.mp4?token=V_BqaYvYHAVkbDe2N4Q-tw9rdfS_5RQGXLTQGgkspUk&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-full","session_youtube_ff_id":"CrX4jHVmDfU","session_youtube_ff_link":"https://youtu.be/CrX4jHVmDfU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=0h48m12s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T15:03:00Z","title":"Graph Transformer for Label Placement","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1394","abstract":"This paper presents discursive patinas, a technique to visualize discussions onto data visualizations, inspired by how people leave traces in the physical world. While data visualizations are widely discussed in online communities and social media, comments tend to be displayed separately from the visualization and we lack ways to relate these discussions back to the content of the visualization, e.g., to situate comments, explain visual patterns, or question assumptions. In our visualization annotation interface, users can designate areas within the visualization. Discursive patinas are made of overlaid visual marks (anchors), attached to textual comments with category labels, likes, and replies. By coloring and styling the anchors, a meta visualization emerges, showing what and where people comment and annotate the visualization. These patinas show regions of heavy discussions, recent commenting activity, and the distribution of questions, suggestions, or personal stories. We ran workshops with 90 students, domain experts, and visualization researchers to study how people use anchors to discuss visualizations and how patinas influence people's understanding of the discussion. Our results show that discursive patinas improve the ability to navigate discussions and guide people to comments that help understand, contextualize, or scrutinize the visualization. We discuss the potential of anchors and patinas to support discursive engagements, including critical readings of visualizations, design feedback, and feminist approaches to data visualization.","accessible_pdf":true,"authors":[{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom","Potsdam University of Applied Sciences, Potsdam, Germany"],"email":"tobias.kauer@fh-potsdam.de","is_corresponding":true,"name":"Tobias Kauer"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"derya.akbaba@liu.se","is_corresponding":false,"name":"Derya Akbaba"},{"affiliations":["University of Applied Sciences Potsdam, Potsdam, Germany"],"email":"doerk@fh-potsdam.de","is_corresponding":false,"name":"Marian D\u00f6rk"},{"affiliations":["Inria, Bordeaux, France","University of Edinburgh, Edinburgh, United Kingdom"],"email":"bbach@inf.ed.ac.uk","is_corresponding":false,"name":"Benjamin Bach"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1394","image_caption":"Discursive Patinas present a new technique that visualizes discussions about visualizations, inspired by traces left in the physical world","keywords":["Data Visualization, Discussion, Annotation"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.17994","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/FLsXwoR_H8E&t=0h0m48s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1394/v-full-1394_Preview.mp4?token=CBABLCrD8Q-YwP3M7gFn4tKU58vJoBc0VguQ7DYvcRM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1394/v-full-1394_Preview.srt?token=Aie2P6umyj0_cHVPgHwnqva_Uf-UOJIhrGytP-lXn_o&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-full","session_youtube_ff_id":"zBwtliqYULc","session_youtube_ff_link":"https://youtu.be/zBwtliqYULc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=0h0m48s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T14:15:00Z","title":"Discursive Patinas: Anchoring Discussions in Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1502","abstract":"Sketching is a common practice among visualization designers, and an approachable entry to visualizations for individuals, but moving from a sketch to a full fledged data visualization often requires throwing away the original sketch recreating it from scratch. We aim to instead formalize thesesketches, enabling them to support iteration and systematic data mapping through a visual-first templating workflow. In this workflow, authors sketch a representative visualization and structure it into an expressive template for an envisioned or partial dataset, capturing implicit style as well as explicit data mappings. In order to demonstrate and evaluate our proposed workflow, we implement DataGarden, and evaluate it through a reproduction and a freeform study. We discuss how DataGarden supports personal expression, and delve into the variety of visualizations that authors can produce with it, identifying cases which demonstrate the limitations of our approach and discuss avenues for future work.","accessible_pdf":false,"authors":[{"affiliations":["Universit\u00e9 Paris-Saclay, Orsay, France"],"email":"anna.offenwanger@gmail.com","is_corresponding":true,"name":"Anna Offenwanger"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Inria, LISN, Orsay, France"],"email":"theophanis.tsandilas@inria.fr","is_corresponding":false,"name":"Theophanis Tsandilas"},{"affiliations":["University of Toronto, Toronto, Canada"],"email":"fanny@dgp.toronto.edu","is_corresponding":false,"name":"Fanny Chevalier"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1502","image_caption":"DataGarden supports sketching personal, expressive designs and formalizing these as structured visualization templates. To express (A) a visualization design idea, a user sketches a few representative glyphs in (B) the canvas, making their vision explicit. DataGarden provides the means to structure the freeform sketch into a visualization template by (C) capturing implicit style and explicit data mappings via user interaction and machine support.","keywords":["Personal Visualization, Visualization template, Sketch input, Sketch-based visualization, Visualization by-example"],"open_access_supplemental_link":"https://datagarden-git.github.io/datagarden/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://hal.science/hal-04664470/","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/FLsXwoR_H8E&t=1h0m54s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1502/v-full-1502_Preview.mp4?token=w4CD-laAbkqYIpzNABukxJTmJ15CNB136u4pl5pvCeg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1502/v-full-1502_Preview.srt?token=JB23hpFMZEeWtz5LvaymlOgJV0mIBGDhUhcpOdvAw60&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-full","session_youtube_ff_id":"IFG97n_gi0g","session_youtube_ff_link":"https://youtu.be/IFG97n_gi0g","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=1h0m54s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T15:15:00Z","title":"DataGarden: Formalizing Personal Sketches into Structured Visualization Templates","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233345340","abstract":"Label quality issues, such as noisy labels and imbalanced class distributions, have negative effects on model performance. Automatic reweighting methods identify problematic samples with label quality issues by recognizing their negative effects on validation samples and assigning lower weights to them. However, these methods fail to achieve satisfactory performance when the validation samples are of low quality. To tackle this, we develop Reweighter, a visual analysis tool for sample reweighting. The reweighting relationships between validation samples and training samples are modeled as a bipartite graph. Based on this graph, a validation sample improvement method is developed to improve the quality of validation samples. Since the automatic improvement may not always be perfect, a co-cluster-based bipartite graph visualization is developed to illustrate the reweighting relationships and support the interactive adjustments to validation samples and reweighting results. The adjustments are converted into the constraints of the validation sample improvement method to further improve validation samples. We demonstrate the effectiveness of Reweighter in improving reweighting results through quantitative evaluation and two case studies.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Weikai Yang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yukai Guo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jing Wu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zheng Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lan-Zhe Guo"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yu-Feng Li"},{"affiliations":"","email":"","is_corresponding":false,"name":"Shixia Liu"}],"award":"","doi":"10.1109/TVCG.2023.3345340","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233345340","image_caption":"Reweightor: (a) The reweighting relationships between 3 (out of 14) validation sample clusters and 6 (out of 35) training sample clusters. V1 and V2 contain low-quality validation samples, resulting in many inconsistent training samples in S1 and S2. (b) After correcting the noisy labels of low-quality validation samples, increasing the weights of high-quality validation samples, and verifying inconsistent training samples, the reweighting results are improved (S''1 and S'2).","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2312.05067","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/FLsXwoR_H8E&t=0h36m58s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233345340/v-tvcg-20233345340_Preview.mp4?token=VcRo6hbin8uXz7KmnTsmVvNa8mU_S8VAGYC-BJIeQhc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233345340/v-tvcg-20233345340_Preview.srt?token=xjZQ2cID1hWuPw7zn9Kzqgwk49mOw6WoCKYnM3tYuPY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-tvcg","session_youtube_ff_id":"bW_5eDLbNng","session_youtube_ff_link":"https://youtu.be/bW_5eDLbNng","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=0h36m58s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T14:51:00Z","title":"Interactive Reweighting for Mitigating Label Quality Issues","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243392476","abstract":"Areas of interest (AOIs) are well-established means of providing semantic information for visualizing, analyzing, and classifying gaze data. However, the usual manual annotation of AOIs is time consuming and further impaired by ambiguities in label assignments. To address these issues, we present an interactive labeling approach that combines visualization, machine learning, and user-centered explainable annotation. Our system provides uncertainty-aware visualization to build trust in classification with an increasing number of annotated examples. It combines specifically designed EyeFlower glyphs, dimensionality reduction, and selection and exploration techniques in an integrated workflow. The approach is versatile and hardware-agnostic, supporting video stimuli from stationary and unconstrained mobile eye trackin alike. We conducted an expert review to assess labeling strategies and trust building.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Maurice Koch"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nan Cao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Daniel Weiskopf"},{"affiliations":"","email":"","is_corresponding":false,"name":"Kuno Kurzhals"}],"award":"","doi":"10.1109/TVCG.2024.3392476","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243392476","image_caption":"Uncertainty-aware visualization approach for interactive labeling of eye-tracking videos that combines specifically designed glyphs, dimensionality reduction, and exploration techniques in an integrated workflow.","keywords":["Visual analytics, eye tracking, uncertainty, active learning, trust building"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/FLsXwoR_H8E&t=0h12m48s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243392476/v-tvcg-20243392476_Preview.mp4?token=gnYzxQO__CeGs4Cun8TMWoG6ni5U1YzoBSZnt1Eia3I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243392476/v-tvcg-20243392476_Preview.srt?token=nFyCu1x6MiFVIVsdLvxqWN9xHWDFQU7yBBPcpxucbos&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-tvcg","session_youtube_ff_id":"QQcYetRH7uw","session_youtube_ff_link":"https://youtu.be/QQcYetRH7uw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=0h12m48s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T14:27:00Z","title":"Active Gaze Labeling: Visualization for Trust Building","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243402610","abstract":"Point clouds are widely used as a versatile representation of 3D entities and scenes for all scale domains and in a variety of application areas, serving as a fundamental data category to directly convey spatial features. However, due to point sparsity, lack of structure, irregular distribution, and acquisition-related inaccuracies, results of point cloudvisualization are often subject to visual complexity and ambiguity. In this regard, non-photorealistic rendering can improve visual communication by reducing the cognitive effort required to understand an image or scene and by directing attention to important features. In the last 20 years, this has been demonstrated by various non-photorealistic rrendering approaches that were proposed to target point clouds specifically. However, they do not use a common language or structure for assessment which complicates comparison and selection. Further, recent developments regarding point cloud characteristics and processing, such as massive data size or web-based rendering are rarelyconsidered. To address these issues, we present a survey on non-photorealistic rendering approaches for point cloud visualization, providing an overview of the current state of research. We derive a structure for the assessment of approaches, proposing seven primary dimensions for the categorization regarding intended goals, data requirements, used techniques, and mode of operation. We then systematically assess corresponding approaches and utilize this classification to identify trends and research gaps, motivating future research in the development of effective non-photorealistic point cloud rendering methods.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Ole Wegen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Willy Scheibel"},{"affiliations":"","email":"","is_corresponding":false,"name":"Matthias Trapp"},{"affiliations":"","email":"","is_corresponding":false,"name":"Rico Richter"},{"affiliations":"","email":"","is_corresponding":false,"name":"J\u00fcrgen D\u00f6llner"}],"award":"","doi":"10.1109/TVCG.2024.3402610","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243402610","image_caption":"Non-photorealistic rendering (NPR) can improve visual communication by reducing the cognitive effort required to understand an image and by directing attention to important features. Over the past two decades, several NPR approaches have been developed, specifically targeting point clouds (1). To evaluate these methods, we use seven dimensions derived from the design process for point cloud NPR approaches (2). The systematic assessment of the corresponding approaches (3) allows us to identify trends and research gaps.","keywords":["Point clouds, survey, non-photorealistic rendering"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/FLsXwoR_H8E&t=0h22m40s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243402610/v-tvcg-20243402610_Preview.mp4?token=tXNQN9yjdAbyE28srWhHmmcERkq0tv8cnRvStp7S6fo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243402610/v-tvcg-20243402610_Preview.srt?token=H6gSriwCgV9HBrta1RH3a67XaSIrXcm7yFaM3WxEoaM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full31","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"Visual Design: Sketching and Labeling","session_uid":"v-tvcg","session_youtube_ff_id":"H6-xLO6_IzM","session_youtube_ff_link":"https://youtu.be/H6-xLO6_IzM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/FLsXwoR_H8E&t=0h22m40s","sessions":["Visual Design: Sketching and Labeling"],"time_stamp":"2024-10-17T14:39:00Z","title":"A Survey on Non-photorealistic Rendering Approaches for Point Cloud Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1833","abstract":"The concept of an intelligent augmented reality (AR) assistant has significant, wide-ranging applications, with potential uses in medicine, military, and mechanics domains. Such an assistant must be able to perceive the environment and actions, reason about the environment state in relation to a given task, and seamlessly interact with the task performer. These interactions typically involve an AR headset equipped with sensors which capture video, audio, and haptic feedback. Previous works have sought to facilitate the development of intelligent AR assistants by visualizing these sensor data streams in conjunction with the assistant's perception and reasoning model outputs. However, existing visual analytics systems do not focus on user modeling or include biometric data, and are only capable of visualizing a single task session for a single performer at a time. Moreover, they typically assume a task involves linear progression from one step to the next. We propose a visual analytics system that allows users to compare performance during multiple task sessions, focusing on non-linear tasks where different step sequences can lead to success. In particular, we design visualizations for understanding user behavior through functional near-infrared spectroscopy (fNIRS) data as a proxy for perception, attention, and memory as well as corresponding motion data (acceleration, angular velocity, and gaze). We distill these insights into embedding representations that allow users to easily select groups of sessions with similar behaviors. We provide two case studies that demonstrate how to use these visualizations to gain insights abouttask performance using data collected during helicopter copilot training tasks. Finally, we evaluate our approach by conducting an in-depth examination of a think-aloud experiment with five domain experts.","accessible_pdf":false,"authors":[{"affiliations":["New York University, New York, United States"],"email":"s.castelo@nyu.edu","is_corresponding":true,"name":"Sonia Castelo Quispe"},{"affiliations":["New York University, New York, United States"],"email":"jlrulff@gmail.com","is_corresponding":false,"name":"Jo\u00e3o Rulff"},{"affiliations":["New York University, Brooklyn, United States"],"email":"pss442@nyu.edu","is_corresponding":false,"name":"Parikshit Solunke"},{"affiliations":["New York University, New York, United States"],"email":"erin.mcgowan@nyu.edu","is_corresponding":false,"name":"Erin McGowan"},{"affiliations":["New York University, New York CIty, United States"],"email":"guandewu@nyu.edu","is_corresponding":false,"name":"Guande Wu"},{"affiliations":["New York University, Brooklyn, United States"],"email":"iran@ccrma.stanford.edu","is_corresponding":false,"name":"Iran Roman"},{"affiliations":["New York University, New York, United States"],"email":"rlopez@nyu.edu","is_corresponding":false,"name":"Roque Lopez"},{"affiliations":["New York University, Brooklyn, United States"],"email":"bs3639@nyu.edu","is_corresponding":false,"name":"Bea Steers"},{"affiliations":["New York University, New York, United States"],"email":"qisun@nyu.edu","is_corresponding":false,"name":"Qi Sun"},{"affiliations":["New York University, New York, United States"],"email":"jpbello@nyu.edu","is_corresponding":false,"name":"Juan Pablo Bello"},{"affiliations":["Northrop Grumman Mission Systems, Redondo Beach, United States"],"email":"bradley.feest@ngc.com","is_corresponding":false,"name":"Bradley S Feest"},{"affiliations":["Northrop Grumman, Aurora, United States"],"email":"michael.middleton@ngc.com","is_corresponding":false,"name":"Michael Middleton"},{"affiliations":["Northrop Grumman, Falls Church, United States"],"email":"ryan.mckendrick@ngc.com","is_corresponding":false,"name":"Ryan McKendrick"},{"affiliations":["New York University, New York City, United States"],"email":"csilva@nyu.edu","is_corresponding":false,"name":"Claudio Silva"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1833","image_caption":"HuBar is a visual analytics system designed to analyze performer behavior in AR-assisted tasks, enabling multi-perspective analysis of multimodal time-series data. It provides a hierarchical set of visualizations: the Scatter Plot View (A) identifies clusters and patterns, the Workload Aggregation View (B) summarizes cognitive workloads and errors, the Event Timeline View (C) aligns time series collected during sessions, enabling comparison across sessions and exploration to update linked views, the Summary Matrix View (D) analyzes procedure frequency and errors, and the Detail View (E) enables in-depth session exploration with synchronized video and time series visualizations.","keywords":["Perception & Cognition, Application Motivated Visualization, Temporal Data, Image and Video Data, Mobile, AR/VR/Immersive, Specialized Input/Display Hardware."],"open_access_supplemental_link":"https://github.com/VIDA-NYU/HuBar","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/pdf/2407.12260v1","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/QoKwx8sUMyg&t=0h23m16s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1833/v-full-1833_Preview.mp4?token=kXpVEp2G8JSoWr3JmAkTz5HHLuEVtLEGtn6CkLaAN8k&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1833/v-full-1833_Preview.srt?token=TJChJ8-kK0-qBRk7QY9CzGYXXCNAFxVIEHdA94znYnw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-full","session_youtube_ff_id":"AaX3LMAAkL4","session_youtube_ff_link":"https://youtu.be/AaX3LMAAkL4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=0h23m16s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T16:24:00Z","title":"HuBar: A Visual Analytics Tool to Explore Human Behaviour based on fNIRS in AR guidance systems","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1189","abstract":"The information visualization research community commonly produces supporting software to demonstrate technical contributions to the field. However, developing this software tends to be an overwhelming task. The final product tends to be a research prototype without much thought for modularization and re-usability, which makes it harder to replicate and adopt. This paper presents a design pattern for facilitating the creation, dissemination, and re-utilization of visualization techniques using reactive widgets. The design pattern features basic concepts that leverage modern front-end development best practices and standards, which facilitate development and replication. The paper presents several usage examples of the pattern, templates for implementation, and even a wrapper for facilitating the conversion of any Vega [27,28] specification into a reactive widget.","accessible_pdf":true,"authors":[{"affiliations":["Northeastern University, San Francisco, United States"],"email":"jguerra@northeastern.edu","is_corresponding":true,"name":"John Alexis Guerra-Gomez"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1189","image_caption":"IEEE VIS 2024 Paper embedding explorer main interface, showing a main scatterplot of each one of the papers of the conference distributed using UMAP dimensionality reduction. The scatterplot has been brushed for selecting papers that are highlighted on the bottom of the page showing the thumbnail image, title and abstract. On the top some controls allow for the selection of the dimensionality reduction method and some hyperparameters","keywords":["Information Visualization, Software Components, Reactive Components, Notebook Programming, Direct Manipulation, Brush and Linking"],"open_access_supplemental_link":"https://observablehq.com/@john-guerra/reactive-widgets","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/QoKwx8sUMyg&t=1h0m6s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1189/v-short-1189_Preview.mp4?token=rPe1SPGjCEZc8bReZDXHzfLMpdURwWQ8JW0xz4oXo94&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1189/v-short-1189_Preview.srt?token=37NNBJWgqLS1dSoUD3CeZ8ifLVuy098GbLy_KBSmUFY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-short","session_youtube_ff_id":"gPxR7ibKKvY","session_youtube_ff_link":"https://youtu.be/gPxR7ibKKvY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=1h0m6s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T17:00:00Z","title":"Towards Reusable and Reactive Widgets for Information Visualization Research and Dissemination","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233261320","abstract":"In recent years, narrative visualization has gained much attention. Researchers have proposed different design spaces for various narrative visualization genres and scenarios to facilitate the creation process. As users' needs grow and automation technologies advance, increasingly more tools have been designed and developed. In this study, we summarized six genres of narrative visualization (annotated charts, infographics, timelines & storylines, data comics, scrollytelling & slideshow, and data videos) based on previous research and four types of tools (design spaces, authoring tools, ML/AI-supported tools and ML/AI-generator tools) based on the intelligence and automation level of the tools. We surveyed 105 papers and tools to study how automation can progressively engage in visualization design and narrative processes to help users easily create narrative visualizations. This research aims to provide an overview of current research and development in the automation involvement of narrative visualization tools. We discuss key research problems in each category and suggest new opportunities to encourage further research in the related domain.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Qing Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Shixiong Cao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jiazhe Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nan Cao"}],"award":"","doi":"10.1109/TVCG.2023.3261320","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233261320","image_caption":"Number of relevant research publications or tools in different genres for narrative visualization in chronological order. This matrix visualizes the distribution of research publications and tools across six narrative visualization genres: Annotated Chart, Infographic, Timeline & Storyline, Data Comics, Scrollytelling & Slideshow, and Data Video, from before 2010 through 2022. Each colored circle represents a type of tool: Design Space (red), Authoring Tool (orange), ML/AI-supported Tool (green), or ML/AI-generator Tool (purple). The numbers represent the total count of publications or tools per genre per year, providing insights into the evolution and focus of research in narrative visualization over time.","keywords":["Data Visualization, Automatic Visualization, Narrative Visualization, Design Space, Authoring Tools, Survey"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2206.12118","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/QoKwx8sUMyg&t=0h35m40s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233261320/v-tvcg-20233261320_Preview.mp4?token=tPrsCIJecGNwXtkTPTVmcb1BRVVVRCOx89fOlRxbpgw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233261320/v-tvcg-20233261320_Preview.srt?token=h7SGnaiJL8ly5r7DJuX2R33E8PRYUlNmo_OZAJ9wHd4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"xPShwRDa_9U","session_youtube_ff_link":"https://youtu.be/xPShwRDa_9U","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=0h35m40s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T16:36:00Z","title":"How Does Automation Shape the Process of Narrative Visualization: A Survey of Tools","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233346641","abstract":"Currently, growing data sources and long-running algorithms impede user attention and interaction with visual analytics applications. Progressive visualization (PV) and visual analytics (PVA) alleviate this problem by allowing immediate feedback and interaction with large datasets and complex computations, avoiding waiting for complete results by using partial results improving with time. Yet, creating a progressive visualization requires more effort than a regular visualization but also opens up new possibilities, such as steering the computations towards more relevant parts of the data, thus saving computational resources. However, there is currently no comprehensive overview of the design space for progressive visualization systems. We surveyed the related work of PV and derived a new taxonomy for progressive visualizations by systematically categorizing all PV publications that included visualizations with progressive features. Progressive visualizations can be categorized by well-known visualization taxonomies, but we also found that progressive visualizations can be distinguished by the way they manage their data processing, data domain, and visual update. Furthermore, we identified key properties such as uncertainty, steering, visual stability, and real-time processing that are significantly different with progressive applications. We also collected evaluation methodologies reported by the publications and conclude with statistical findings, research gaps, and open challenges. A continuously updated visual browser of the survey data is available at visualsurvey.net/pva.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Alex Ulmer"},{"affiliations":"","email":"","is_corresponding":false,"name":"Marco Angelini"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jean-Daniel Fekete"},{"affiliations":"","email":"","is_corresponding":false,"name":"J\u00f6rn Kohlhammerm"},{"affiliations":"","email":"","is_corresponding":false,"name":"Thorsten May"}],"award":"","doi":"10.1109/TVCG.2023.3346641","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233346641","image_caption":"Our new taxonomy for progressive visualisations. The categories of visualisation are based on previous taxonomies proposed by Shneiderman, Keim and Munzner. The categories of progressive processing represent an extension of the characterisation proposed by Angelini et al., with the addition of a new variant, termed 'custom chunking'. The categories of data domain address the implications of differing visualisation designs in the context of known and unknown data or process endpoints. The fourth category is visual update pattern, which indicates the manner in which visualisations are updated in response to the generation of new partial results.","keywords":["Data visualization, Convergence, Visual analytics, Taxonomy Surveys, Rendering (computer graphics), Task analysis, Progressive Visual Analytics, Progressive Visualization, Taxonomy, State-of-the-Art Report, Survey"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/QoKwx8sUMyg&t=0h48m8s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346641/v-tvcg-20233346641_Preview.mp4?token=-2u0ZjP8hKuE44Jh4xYL2aNynKfTkVJhCVBS1JjCWng&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233346641/v-tvcg-20233346641_Preview.srt?token=jl-uuHvQ3_CFNepSIiaxzD7GooOQhHVcC-8A7FXfBRc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"XXxozI-bcog","session_youtube_ff_link":"https://youtu.be/XXxozI-bcog","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=0h48m8s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T16:48:00Z","title":"A Survey on Progressive Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243390219","abstract":"This system paper documents the technical foundations for the extension of the Topology ToolKit (TTK) to distributed-memory parallelism with the Message Passing Interface (MPI). While several recent papers introduced topology-based approaches for distributed-memory environments, these were reporting experiments obtained with tailored, mono-algorithm implementations. In contrast, we describe in this paper a versatile approach (supporting both triangulated domains and regular grids) for the support of topological analysis pipelines, i.e. a sequence of topological algorithms interacting together, possibly on distinct numbers of processes. While developing this extension, we faced several algorithmic and software engineering challenges, which we document in this paper. We describe an MPI extension of TTK\u2019s data structure for triangulation representation and traversal, a central component to the global performance and generality of TTK\u2019s topological implementations. We also introduce an intermediate interface between TTK and MPI, both at the global pipeline level, and at the fine-grain algorithmic level. We provide a taxonomy for the distributed-memory topological algorithms supported by TTK, depending on their communication needs and provide examples of hybrid MPI+thread parallelizations. Detailed performance analyses show that parallel efficiencies range from 20% to 80% (depending on the algorithms), and that the MPI-specific preconditioning introduced by our framework induces a negligible computation time overhead. We illustrate the new distributed-memory capabilities of TTK with an example of advanced analysis pipeline, combining multiple algorithms, run on the largest publicly available dataset we have found (120 billion vertices) on a standard cluster with 64 nodes (for a total of 1536 cores). Finally, we provide a roadmap for the completion of TTK\u2019s MPI extension, along with generic recommendations for each algorithm communication category.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"E. Le Guillou"},{"affiliations":"","email":"","is_corresponding":false,"name":"M. Will"},{"affiliations":"","email":"","is_corresponding":false,"name":"P. Guillou"},{"affiliations":"","email":"","is_corresponding":false,"name":"J. Lukasczyk"},{"affiliations":"","email":"","is_corresponding":false,"name":"P. Fortin"},{"affiliations":"","email":"","is_corresponding":false,"name":"C. Garth"},{"affiliations":"","email":"","is_corresponding":false,"name":"J. Tierny"}],"award":"","doi":"10.1109/TVCG.2024.3390219","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243390219","image_caption":"Output of an integrated pipeline that produces a real-life use case combining all of the algorithms parallelized in our paper. The pipeline is executed on the Turbulent Channel Flow dataset (120 billion vertices), a three-dimensional regular grid with two scalar fields, the pressure of the fluid and its gradient magnitude. The spheres correspond to the pressure critical points and the tubes are the integral lines starting at saddle points. Figure (a) shows all of the produced geometry, while (b) and (c) show parts of the output zoomed in. ","keywords":["Topological data analysis, high-performance computing, distributed-memory algorithms."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/pdf/2310.08339","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/QoKwx8sUMyg&t=0h12m56s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243390219/v-tvcg-20243390219_Preview.mp4?token=JZVVxFRRTH9J64rV7USkfm_tjj3QFugQtpJl9kFyeOU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"CZWLfhlBYiQ","session_youtube_ff_link":"https://youtu.be/CZWLfhlBYiQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=0h12m56s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T16:12:00Z","title":"TTK is Getting MPI-Ready","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243406387","abstract":"The process of labeling medical text plays a crucial role in medical research. Nonetheless, creating accurately labeled medical texts of high quality is often a time-consuming task that requires specialized domain knowledge. Traditional methods for generating labeled data typically rely on rigid rule-based approaches, which may not adapt well to new tasks. While recent machine learning (ML) methodologies have mitigated the manual labeling efforts, configuring models to align with specific research requirements can be challenging for labelers without technical expertise. Moreover, automated labeling techniques, such as transfer learning, face difficulties in in directly incorporating expert input, whereas semi-automated methods, like data programming, allow knowledge integration through rules or knowledge bases but may lack continuous result refinement throughout the entire labeling process. In this study, we present a collaborative human-ML teaming workflow that seamlessly integrates visual cluster analysis and active learning to assist domain experts in labeling medical text with high efficiency. Additionally, we introduce an innovative neural network model called the embedding network, which incorporates expert insights to generate task-specific embeddings for medical texts. We integrate the workflow and embedding network into a visual analytics tool named KMTLabeler, equipped with coordinated multi-level views and interactions. Two illustrative case studies, along with a controlled user study, provide substantial evidence of the effectiveness of KMTLabeler in creating an efficient labeling environment for medical text classification.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"He Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yang Ouyang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yuchen Wu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chang Jiang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lixia Jin"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yuanwu Cao"},{"affiliations":"","email":"","is_corresponding":true,"name":"Quan Li"}],"award":"","doi":"10.1109/TVCG.2024.3406387","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243406387","image_caption":" The KMTLabeler interface: The (A) Control Panel provides an overview of the dataset and enables filtering for labeling. The (B) Embedding Projection View allows users to compare and adjust projection structures for pattern exploration, while the (C) Weight Modification Panel and the (D) Rule Formulation Panel enable knowledge-based tuning of projection structures to align them with specific tasks. The (E) Cluster Comparison View facilitates detailed comparison of clusters for label creation, and the (F) Label Evaluation View evaluates clustering groups according to various metrics. The (G) Action Record View tracks actions during labeling, and (H) Active Learning Panel supports \"one-by-one\" labeling of suggested instances.","keywords":["Medical Text Labeling, Expert Knowledge, Embedding Network, Visual Cluster Analysis, Active Learning"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/QoKwx8sUMyg&t=0h0m39s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243406387/v-tvcg-20243406387_Preview.mp4?token=VnPRkdepeRVTnH4pmVCk7zBKKkpq42d2mzlaLMT86as&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243406387/v-tvcg-20243406387_Preview.srt?token=MPFzXxvpYtQoeH-IVkROFZyv0TberWC63x-c_lzzevI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full4","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"The Toolboxes of Visualization","session_uid":"v-tvcg","session_youtube_ff_id":"s2lF1u4g7c4","session_youtube_ff_link":"https://youtu.be/s2lF1u4g7c4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/QoKwx8sUMyg&t=0h0m39s","sessions":["The Toolboxes of Visualization"],"time_stamp":"2024-10-17T16:00:00Z","title":"KMTLabeler: An Interactive Knowledge-Assisted Labeling Tool for Medical Text Classification","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1705","abstract":"Contour trees describe the topology of level sets in scalar fields and are widely used in topological data analysis and visualization. A main challenge of utilizing contour trees for large-scale scientific data is their computation at scale using high-performance computing. To address this challenge, recent work has introduced distributed hierarchical contour trees for distributed computation and storage of contour trees. However, effective use of these distributed structures in analysis and visualization requires subsequent computation of geometric properties and branch decomposition to support contour extraction and exploration. In this work, we introduce distributed algorithms for augmentation, hypersweeps, and branch decomposition that enable parallel computation of geometric properties, and support the use of distributed contour trees as query structures for scientific exploration. We evaluate the parallel performance of these algorithms and apply them to identify and extract important contours for scientific visualization.","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"mingzhefluorite@gmail.com","is_corresponding":true,"name":"Mingzhe Li"},{"affiliations":["University of Leeds, Leeds, United Kingdom"],"email":"h.carr@leeds.ac.uk","is_corresponding":false,"name":"Hamish Carr"},{"affiliations":["Lawrence Berkeley National Laboratory, Berkeley, United States"],"email":"oruebel@lbl.gov","is_corresponding":false,"name":"Oliver R\u00fcbel"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"wang.bei@gmail.com","is_corresponding":false,"name":"Bei Wang"},{"affiliations":["Lawrence Berkeley National Laboratory, Berkeley, United States"],"email":"ghweber@lbl.gov","is_corresponding":false,"name":"Gunther H Weber"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1705","image_caption":"Our method applied to a 3D WarpX laser-driven, plasma-based particle accelerator simulation dataset with a resolution of 6791x371x371. We use the x-component of the electric field. Left: three 2D slices of the volume along different axes with the extracted contours on the slice. Right: Using distributed topological data analysis to extract and visualize 3D isosurfaces corresponding to the top-11 branches of the contour tree.","keywords":["Contour trees, branch decomposition, parallel algorithms, computational topology, topological data analysis"],"open_access_supplemental_link":"https://gitlab.kitware.com/vtk/vtk-m","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Pd3W5-EJRVg&t=0h27m49s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1705/v-full-1705_Preview.mp4?token=KRb7CIhbrASvEyQDGsuCalh0XjYtWrTtPA72tjD062g&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-full","session_youtube_ff_id":"_RvXzzJfjFA","session_youtube_ff_link":"https://youtu.be/_RvXzzJfjFA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=0h27m49s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T14:39:00Z","title":"Distributed Augmentation, Hypersweeps, and Branch Decomposition of Contour Trees for Scientific Exploration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1793","abstract":"This research explores a novel paradigm for preserving topological segmentations in existing error-bounded lossy compressors. Today's lossy compressors rarely consider preserving topologies such as Morse-Smale complexes, and the discrepancies in topology between original and decompressed datasets could potentially result in erroneous interpretations or even incorrect scientific conclusions. In this paper, we focus on preserving Morse-Smale segmentations in 2D/3D piecewise linear scalar fields, targeting the precise reconstruction of minimum/maximum labels induced by the integral line of each vertex. The key is to derive a series of edits during compression time. These edits are applied to the decompressed data, leading to an accurate reconstruction of segmentations while keeping the error within the prescribed error bound. To this end, we develop a workflow to fix extrema and integral lines alternatively until convergence within finite iterations. We accelerate each workflow component with shared-memory/GPU parallelism to make the performance practical for coupling with compressors. We demonstrate use cases with fluid dynamics, ocean, and cosmology application datasets with a significant acceleration with an NVIDIA A100 GPU.","accessible_pdf":true,"authors":[{"affiliations":["The Ohio State University, Columbus, United States"],"email":"li.14025@osu.edu","is_corresponding":true,"name":"Yuxiao Li"},{"affiliations":["University of California, Riverside, Riverside, United States"],"email":"xlian007@ucr.edu","is_corresponding":false,"name":"Xin Liang"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"wang.bei@gmail.com","is_corresponding":false,"name":"Bei Wang"},{"affiliations":["The Ohio State University, Columbus, United States"],"email":"qiu.722@osu.edu","is_corresponding":false,"name":"Yongfeng Qiu"},{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"lyan@anl.gov","is_corresponding":false,"name":"Lin Yan"},{"affiliations":["The Ohio State University, Columbus, United States"],"email":"guo.2154@osu.edu","is_corresponding":false,"name":"Hanqi Guo"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1793","image_caption":"This figure compares SZ3 and ours (SZ3) in terms of feature preservation capability for MSS in combustion data. False cases are highlighted with boxes.","keywords":["Lossy compression, feature-preserving compression, Morse-Smale segmentations, shared-memory parallelism."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2406.09423","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Pd3W5-EJRVg&t=0h0m44s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1793/v-full-1793_Preview.mp4?token=mq_GwqVoaX8VM0EBDuKWSvU8zRZEYMCZpPRYlndDs5E&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1793/v-full-1793_Preview.srt?token=qZnV8L0OVhtE5M8C6zYi7I6Me-QIz-UonlBkWLAHe44&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-full","session_youtube_ff_id":"TRMO8YUuSSs","session_youtube_ff_link":"https://youtu.be/TRMO8YUuSSs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=0h0m44s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T14:15:00Z","title":"MSz: An Efficient Parallel Algorithm for Correcting Morse-Smale Segmentations in Error-Bounded Lossy Compressors","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1803","abstract":"Scalar field comparison is a fundamental task in scientific visualization. In topological data analysis, we compare topological descriptors of scalar fields---such as persistence diagrams and merge trees---because they provide succinct and robust abstract representations. Several similarity measures for topological descriptors seem to be both asymptotically and practically efficient with polynomial time algorithms, but they do not scale well when handling large-scale, time-varying scientific data and ensembles. In this paper, we propose a new framework to facilitate the comparative analysis of merge trees, inspired by tools from locality sensitive hashing (LSH). LSH hashes similar objects into the same hash buckets with high probability. We propose two new similarity measures for merge trees that can be computed via LSH, using new extensions to Recursive MinHash and subpath signature, respectively. Our similarity measures are extremely efficient to compute and closely resemble the results of existing measures such as merge tree edit distance or geometric interleaving distance. Our experiments demonstrate the utility of our LSH framework in applications such as shape matching, clustering, key event detection, and ensemble summarization. ","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, SALT LAKE CITY, United States"],"email":"lyuweiran@gmail.com","is_corresponding":true,"name":"Weiran Lyu"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"g.s.raghavendra@gmail.com","is_corresponding":false,"name":"Raghavendra Sridharamurthy"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jeffp@cs.utah.edu","is_corresponding":false,"name":"Jeff M. Phillips"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"wang.bei@gmail.com","is_corresponding":false,"name":"Bei Wang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1803","image_caption":"An overview of our pipeline is shown in the representative image. Given a set of scalar fields as input, we first simplify each scalar field using a small persistence threshold to remove noise from the data. We then compute the corresponding merge tree with labeling. These merge trees are subsequently used to generate signatures using either the RMH or subpath signature algorithms. Locality-sensitive hashing (LSH) is employed to divide the signatures into bands and rows. Finally, for empirical comparison, we generate distance matrices by collecting similar pairs from the LSH.","keywords":["Merge trees, locality sensitive hashing, comparative analysis, topological data analysis, scientific visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Pd3W5-EJRVg&t=0h14m46s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1803/v-full-1803_Preview.mp4?token=bTKTc2VQvpQs8YUA6AesgADr3vNA7g6xS8QX_9lFKj4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1803/v-full-1803_Preview.srt?token=YJGJgzwZGd5-Z0ayemd9xf2RokZnwKSp5oU17mupfg0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-full","session_youtube_ff_id":"77lGpXvrG0k","session_youtube_ff_link":"https://youtu.be/77lGpXvrG0k","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=0h14m46s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T14:27:00Z","title":"Fast Comparative Analysis of Merge Trees Using Locality-Sensitive Hashing","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1188","abstract":"Vortices and their analysis play a critical role in the understanding of complex phenomena in turbulent flows. Traditional vortex extraction methods, notably region-based techniques, often overlook the entanglement phenomenon, resulting in the inclusion of multiple vortices within a single extracted region. Their separation is necessary for quantifying different types of vortices and their statistics. In this study, we propose a novel vortex separation method that extends the conventional contour tree-based segmentation approach with an additional step termed \u201clayering\u201d. Upon extracting a vortical region using specified vortex criteria (e.g., \u03bb2), we initially establish topological segmentation based on the contour tree, followed by the layering process to allocate appropriate segmentation IDs to unsegmented cells, thus separating individual vortices within the region. However, these regions may still suffer from inaccurate splits, which we address statistically by leveraging the continuity of vorticity lines across the split boundaries. Our findings demonstrate a significant improvement in both the separation of vortices and the mitigation of inaccurate splits compared to prior methods.","accessible_pdf":false,"authors":[{"affiliations":["University of Houston, Houston, United States"],"email":"adeelz92@gmail.com","is_corresponding":true,"name":"Adeel Zafar"},{"affiliations":["University of Houston, Houston, United States"],"email":"zpoorsha@cougarnet.uh.edu","is_corresponding":false,"name":"Zahra Poorshayegh"},{"affiliations":["University of Houston, Houston, United States"],"email":"diyang@uh.edu","is_corresponding":false,"name":"Di Yang"},{"affiliations":["University of Houston, Houston, United States"],"email":"chengu@cs.uh.edu","is_corresponding":false,"name":"Guoning Chen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1188","image_caption":"This figure illustrates the steps of the proposed topological separation method. (a) shows a vortical region extracted using a specific value of ?2, along with the critical points of the minimal join tree. (b) displays the contour tree-based segmentation of the region using the extracted minimal join tree. (c) depicts the use of \ufffdlayering\ufffd to assign appropriate segmentation IDs to the segment (red) associated with the maximum. (d) shows the region being separated into exactly two vortices (green and blue). (e) illustrates the process of ensuring the validity of the split by computing the vorticity lines in the vicinity of the split.\" ","keywords":["Fluid flow, vortices, vortex topology"],"open_access_supplemental_link":"https://arxiv.org/src/2407.03384v1/anc/supp_doc.pdf","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.03384","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Pd3W5-EJRVg&t=1h4m31s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1188/v-short-1188_Preview.mp4?token=sgv_irFDfgo2UYspqvHcN4nevlgpkBM3_ckeCMVGAiU&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1188/v-short-1188_Preview.srt?token=SYeVB_zKh2OueId1Sa6sV8jHchqiMdi7Sb3LdtGw9Yk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-short","session_youtube_ff_id":"fzAYRuAZbwA","session_youtube_ff_link":"https://youtu.be/fzAYRuAZbwA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=1h4m31s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T15:15:00Z","title":"Topological Separation of Vortices","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233330262","abstract":"This paper presents a computational framework for the concise encoding of an ensemble of persistence diagrams, in the form of weighted Wasserstein barycenters [100], [102] of a dictionary of atom diagrams. We introduce a multi-scale gradient descent approach for the efficient resolution of the corresponding minimization problem, which interleaves the optimization of the barycenter weights with the optimization of the atom diagrams. Our approach leverages the analytic expressions for the gradient of both sub-problems to ensure fast iterations and it additionally exploits shared-memory parallelism. Extensive experiments on public ensembles demonstrate the efficiency of our approach, with Wasserstein dictionary computations in the orders of minutes for the largest examples. We show the utility of our contributions in two applications. First, we apply Wassserstein dictionaries to data reduction and reliably compress persistence diagrams by concisely representing them with their weights in the dictionary. Second, we present a dimensionality reduction framework based on a Wasserstein dictionary defined with a small number of atoms (typically three) and encode the dictionary as a low dimensional simplex embedded in a visual space (typically in 2D). In both applications, quantitative experiments assess the relevance of our framework. Finally, we provide a C++ implementation that can be used to reproduce our results.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Keanu Sisouk"},{"affiliations":"","email":"","is_corresponding":false,"name":"Julie Delon"},{"affiliations":"","email":"","is_corresponding":false,"name":"Julien Tierny"}],"award":"","doi":"10.1109/TVCG.2023.3330262","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233330262","image_caption":"Visual comparison (left) between the input persistence diagrams for three members of an initial ensemble (one member per ground-truth cluster class). For each member, the sphere color encodes the correspondence between the input and the compressed diagrams. This visual comparison shows that the main features of the diagrams are well preserved by our reduction approach, for which a low relative reconstruction error can be observed. The planar overview of the ensemble (right) generated by our dimensionality reduction enables the visualization of the relations between the different diagrams of the ensemble.","keywords":["Topological data analysis, ensemble data, persistence diagrams"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2304.14852","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Pd3W5-EJRVg&t=0h40m6s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233330262/v-tvcg-20233330262_Preview.mp4?token=5OxVTy6viqjBpmHEOIm4vvAGI8HOoHWyPgysuZR-PWk&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-tvcg","session_youtube_ff_id":"h_qmhmjYFFs","session_youtube_ff_link":"https://youtu.be/h_qmhmjYFFs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=0h40m6s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T14:51:00Z","title":"Wasserstein Dictionaries of Persistence Diagrams","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233334755","abstract":"This paper presents a computational framework for the Wasserstein auto-encoding of merge trees (MT-WAE), a novel extension of the classical auto-encoder neural network architecture to the Wasserstein metric space of merge trees. In contrast to traditional auto-encoders which operate on vectorized data, our formulation explicitly manipulates merge trees on their associated metric space at each layer of the network, resulting in superior accuracy and interpretability. Our novel neural network approach can be interpreted as a non-linear generalization of previous linear attempts [79] at merge tree encoding. It also trivially extends to persistence diagrams. Extensive experiments on public ensembles demonstrate the efficiency of our algorithms, with MT-WAE computations in the orders of minutes on average. We show the utility of our contributions in two applications adapted from previous work on merge tree encoding [79]. First, we apply MT-WAE to merge tree compression, by concisely representing them with their coordinates in the final layer of our auto-encoder. Second, we document an application to dimensionality reduction, by exploiting the latent space of our auto-encoder, for the visual analysis of ensemble data. We illustrate the versatility of our framework by introducing two penalty terms, to help preserve in the latent space both the Wasserstein distances between merge trees, as well as their clusters. In both applications, quantitative experiments assess the relevance of our framework. Finally, we provide a C++ implementation that can be used for reproducibility.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Mathieu Pont"},{"affiliations":"","email":"","is_corresponding":true,"name":"Julien Tierny"}],"award":"","doi":"10.1109/TVCG.2023.3334755","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233334755","image_caption":"Visual analysis of the Earthquake ensemble ((a) each ground-truth class is represented by one of its members), with our Wasserstein Auto-Encoder of Merge Trees (MT-WAE). We apply our contributions to merge tree compression ((b), right) by simply storing their coordinates in the last decoding layer of our network. We exploit the latent space of our network to generate 2D layouts of the ensemble (c). The reconstruction of user-defined locations ((c) and (d), purple) enables an interactive exploration of the latent space. MT-WAE also supports persistence correlation views (e), which reveal the persistent features which exhibit the most variability in the ensemble. ","keywords":["Topological data analysis, ensemble data, persistence diagrams, merge trees, auto-encoders, neural networks"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Pd3W5-EJRVg&t=0h52m8s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233334755/v-tvcg-20233334755_Preview.mp4?token=NCOxCxgJ-2fZg33e5WgixMfQD43MuXdvBkAm4BXwzrI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233334755/v-tvcg-20233334755_Preview.srt?token=SGT5s_ZQcXHR43diaNIsh61J_pojn88sDLy60lEsaso&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full5","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Topological Data Analysis","session_uid":"v-tvcg","session_youtube_ff_id":"jop4MUY9KDE","session_youtube_ff_link":"https://youtu.be/jop4MUY9KDE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Pd3W5-EJRVg&t=0h52m8s","sessions":["Topological Data Analysis"],"time_stamp":"2024-10-17T15:03:00Z","title":"Wasserstein Auto-Encoders of Merge Trees (and Persistence Diagrams)","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1039","abstract":"Propagation analysis refers to studying how information spreads on social media, a pivotal endeavor for understanding social sentiment and public opinions. Numerous studies contribute to visualizing information spread, but few have considered the implicit and complex diffusion patterns among multiple platforms. To bridge the gap, we summarize cross-platform diffusion patterns with experts and identify significant factors that dissect the mechanisms of cross-platform information spread. Based on that, we propose an information diffusion model that estimates the likelihood of a topic/post spreading among different social media platforms. Moreover, we propose a novel visual metaphor that encapsulates cross-platform propagation in a manner analogous to the spread of seeds across gardens. Specifically, we visualize platforms, posts, implicit cross-platform routes, and salient instances as elements of a virtual ecosystem \u2014 gardens, flowers, winds, and seeds, respectively. We further develop a visual analytic system, namely BloomWind, that enables users to quickly identify the cross-platform diffusion patterns and investigate the relevant social media posts. Ultimately, we demonstrate the usage of BloomWind through two case studies and validate its effectiveness using expert interviews.","accessible_pdf":false,"authors":[{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"940662579@qq.com","is_corresponding":true,"name":"Jianing Yin"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"hzjia@zju.edu.cn","is_corresponding":false,"name":"Hanze Jia"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"zhoubuwei@zju.edu.cn","is_corresponding":false,"name":"Buwei Zhou"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"tangtan@zju.edu.cn","is_corresponding":false,"name":"Tan Tang"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"yingluu@zju.edu.cn","is_corresponding":false,"name":"Lu Ying"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"sn_ye@zju.edu.cn","is_corresponding":false,"name":"Shuainan Ye"},{"affiliations":["Michigan State University, East Lansing, United States"],"email":"pengtaiq@msu.edu","is_corresponding":false,"name":"Tai-Quan Peng"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1039","image_caption":"Interface Overview of BloomWind (Cluster-level): (a) Cluster-level Propagation View, demonstrating the diffusion process of topics among platforms; (b) Timeline View, for selecting a time frame and controlling the animation process of propagation; (c) Cluster-level Detail View, listing the post and user information by topic and platform.","keywords":["Propagation analysis, social media visualization, cross-platform propagation, metaphor design"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/7Y2cPfXGiAY&t=0h54m24s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1039/v-full-1039_Preview.mp4?token=RMStTCRgaLgf8wuswgS8B2S_P9JX_UtU18dAZAvK_6I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1039/v-full-1039_Preview.srt?token=gd0jO6EgrCVsS41Ukgf-sCEr_i-Rbk7cSV5HtnvGPuw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-full","session_youtube_ff_id":"orsYGZt1cWI","session_youtube_ff_link":"https://youtu.be/orsYGZt1cWI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=0h54m24s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T18:33:00Z","title":"Blowing Seeds Across Gardens: Visualizing Implicit Propagation of Cross-Platform Social Media Posts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1325","abstract":"Dynamic data visualizations can convey large amounts of information over time, such as using motion to depict changes in data values for multiple entities. Such dynamic displays put a demand on our visual processing capacities, yet our perception of motion is limited. Several techniques have been shown to improve the processing of dynamic displays. Staging the animation to sequentially show steps in a transition and tracing object movement by displaying trajectory histories can improve processing by reducing the cognitive load. In this paper, We examine the effectiveness of staging and tracing in dynamic displays. We showed participants animated line charts depicting the movements of lines and asked them to identify the line with the highest mean and variance. We manipulated the animation to display the lines with or without staging, tracing and history, and compared the results to a static chart as a control. Results showed that tracing and staging are preferred by participants, and improve their performance in mean and variance tasks respectively. They also preferred display time 3 times shorter when staging is used. Also, encoding animation speed with mean and variance in congruent tasks is associated with higher accuracy. These findings help inform real-world best practices for building dynamic displays. The supplementary materials can be found at https://osf.io/8c95v/","accessible_pdf":false,"authors":[{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"shu343@gatech.edu","is_corresponding":true,"name":"Songwen Hu"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"ouxunjiang@u.northwestern.edu","is_corresponding":false,"name":"Ouxun Jiang"},{"affiliations":["Dolby Laboratories Inc., San Francisco, United States"],"email":"jcr@dolby.com","is_corresponding":false,"name":"Jeffrey Riedmiller"},{"affiliations":["Georgia Tech, Atlanta, United States","University of Massachusetts Amherst, Amherst, United States"],"email":"cxiong@gatech.edu","is_corresponding":false,"name":"Cindy Xiong Bearfield"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1325","image_caption":"Examples of different animation design options. The animations are arranged in a time sequence from top to bottom and categorized into six conditions from left to right.","keywords":["Animation, Dynamic Displays, Perception, Motion, Analytic Tasks"],"open_access_supplemental_link":"https://osf.io/8c95v/","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/7Y2cPfXGiAY&t=0h44m13s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1325/v-full-1325_Preview.mp4?token=znV_u6YW7G-s5ppYwiYo4wWNrKPrsIIJxfTiurV5PC8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-full","session_youtube_ff_id":"pY3yFbMe5RE","session_youtube_ff_link":"https://youtu.be/pY3yFbMe5RE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=0h44m13s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T17:45:00Z","title":"Motion-Based Visual Encoding Can Improve Performance on Perceptual Tasks with Dynamic Time Series","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1451","abstract":"We present a systematic review, an empirical study, and a first set of considerations for designing visualizations in motion, derived from a concrete scenario in which these visualizations were used to support a primary task. In practice, when viewers are confronted with embedded visualizations, they often have to focus on a primary task and can only quickly glance at a visualization showing rich, often dynamically updated, information. As such, the visualizations must be designed so as not to distract from the primary task, while at the same time being readable and useful for aiding the primary task. For example, in games, players who are engaged in a battle have to look at their enemies but also read the remaining health of their own game character from the health bar over their character's head. Many trade-offs are possible in the design of embedded visualizations in such dynamic scenarios, which we explore in-depth in this paper with a focus on user experience. We use video games as an example of an application context with a rich existing set of visualizations in motion. We begin our work with a systematic review of in-game visualizations in motion. Next, we conduct an empirical user study to investigate how different embedded visualizations in motion designs impact user experience. We conclude with a set of considerations and trade-offs for designing visualizations in motion more broadly as derived from what we learned about video games. All supplemental materials of this paper are available at osf.io/3v8wm/.","accessible_pdf":true,"authors":[{"affiliations":["Xi'an Jiaotong-Liverpool University, Suzhou, China","Universit\u00e9 Paris-Saclay, CNRS, Inria, Gif-sur-Yvette, France"],"email":"yaolijie0219@gmail.com","is_corresponding":true,"name":"Lijie Yao"},{"affiliations":["Univerisit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"federicabucchieri@gmail.com","is_corresponding":false,"name":"Federica Bucchieri"},{"affiliations":["Carleton University, Ottawa, Canada"],"email":"dieselfish@gmail.com","is_corresponding":false,"name":"Victoria McArthur"},{"affiliations":["LISN, Universit\u00e9 Paris-Saclay, CNRS, INRIA, Orsay, France"],"email":"anastasia.bezerianos@universite-paris-saclay.fr","is_corresponding":false,"name":"Anastasia Bezerianos"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"petra.isenberg@inria.fr","is_corresponding":false,"name":"Petra Isenberg"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1451","image_caption":"Three situated visualizations tested in our game RobotLife: Left - a horizontal bar chart positioned outside of the game enemy character, Center - a vertical bar chart integrated in the texture of the game enemy character, and Right - a circular bar chart (donut chart) partially match to the design of game enemy character.","keywords":["Situated visualization, visualization in motion, design considerations"],"open_access_supplemental_link":"https://osf.io/3v8wm/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2408.01991","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/7Y2cPfXGiAY&t=0h20m37s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1451/v-full-1451_Preview.mp4?token=vn2MF9S6BIWPnnQW9jdGn6mw5twlPZQJCJeUv_PSqLI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1451/v-full-1451_Preview.srt?token=3ZLLU9lFYcU3KI9gdD-fMPf0fksfp4GKOwUShVB7RhM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-full","session_youtube_ff_id":"X9GOtQyXfx8","session_youtube_ff_link":"https://youtu.be/X9GOtQyXfx8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=0h20m37s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T18:09:00Z","title":"User Experience of Visualizations in Motion: A Case Study and Design Considerations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1192","abstract":"Narrative visualization has become a crucial tool in data presentation, merging storytelling with data visualization to convey complex information in an engaging and accessible manner. In this study, we review the design space for narrative visualizations, focusing on animation style, through a comprehensive analysis of 80 papers from key visualization venues. We categorize these papers into six broad themes: Animation Style, Interactivity, Technology Usage, Methodology Development, Evaluation Type, and Application Domain. Our findings reveal a significant evolution in the field, marked by a growing preference for animated and non-interactive techniques. This trend reflects a shift towards minimizing user interaction while enhancing the clarity and impact of data presentation. We also identified key trends and technologies shaping the field, highlighting the role of technologies, such as machine learning in driving these changes. We offer insights into the dynamic interrelations within the narrative visualization domains, and suggest future research directions, including exploring non-interactive techniques, examining the interplay between different visualization elements, and developing domain-specific visualizations.","accessible_pdf":false,"authors":[{"affiliations":["Louisiana State University, Baton Rouge, United States"],"email":"jyang44@lsu.edu","is_corresponding":true,"name":"Vyri Junhan Yang"},{"affiliations":["Louisiana State University, Baton Rouge, United States"],"email":"mjasim@lsu.edu","is_corresponding":false,"name":"Mahmood Jasim"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1192","image_caption":"We explore the design space of narrative visualization, focusing on animation styles. We categorize 80 papers from top visualization venues into six categories, including Animation Style, Interactivity, Methodology, Technology, Evaluation Type , and Application Domain. We discuss the interplay between different visualization techniques and elements and the trend to focus on domain-specific visualizations.","keywords":["Narrative visualizations, static and animated visualization, categorization, design space"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/7Y2cPfXGiAY&t=1h9m53s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1192/v-short-1192_Preview.mp4?token=wSAJAGdGX1p1H0svxN5jho_bPjtmkIq3M03qDyIOPPM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1192/v-short-1192_Preview.srt?token=XE-6-nE7biDRyNXm7vjOIvCIUqitl1KrzsCI4ykg1rk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-short","session_youtube_ff_id":"6oCqQbTXScg","session_youtube_ff_link":"https://youtu.be/6oCqQbTXScg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=1h9m53s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T18:45:00Z","title":"Animating the Narrative: A Review of Animation Styles in Narrative Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20223193756","abstract":"Information visualization uses various types of representations to encode data into graphical formats. Prior work on visualization techniques has evaluated the accuracy of perceived numerical data values from visual data encodings such as graphical position, length, orientation, size, and color. Our work aims to extend the research of graphical perception to the use of motion as data encodings for quantitative values. We present two experiments implementing multiple fundamental aspects of motion such as type, speed, and synchronicity that can be used for numerical value encoding as well as comparing motion to static visual encodings in terms of user perception and accuracy. We studied how well users can assess the differences between several types of motion and static visual encodings and present an updated ranking of accuracy for quantitative judgments. Our results indicate that non-synchronized motion can be interpreted more quickly and more accurately than synchronized motion. Moreover, our ranking of static and motion visual representations shows that motion, especially expansion and translational types, has great potential as a data encoding technique for quantitative value. Finally, we discuss the implications for the use of animation and motion for numerical representations in data visualization.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Shaghayegh Esmaeili"},{"affiliations":"","email":"","is_corresponding":false,"name":"Samia Kabir"},{"affiliations":"","email":"","is_corresponding":false,"name":"Anthony M. Colas"},{"affiliations":"","email":"","is_corresponding":false,"name":"Rhema P. Linder"},{"affiliations":"","email":"","is_corresponding":false,"name":"Eric D. Ragan"}],"award":"","doi":"10.1109/TVCG.2022.3193756","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20223193756","image_caption":"This preview image compares static and motion-based data encoding techniques for quantitative values. The top row shows static encodings, including area, color, angle, position, and length. The bottom row illustrates dynamic motion encodings: expansion, vibration, flicker, and vertical motion. Arrows indicate the direction of movement, emphasizing the dynamic nature of these motion-based visualizations. The image highlights how different visual properties--both static and motion-based--can be used for graphical perception and accuracy in data interpretation. ","keywords":["Information visualization, animation and motion-related techniques, empirical study, graphical perception, evaluation."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/7Y2cPfXGiAY&t=0h1m15s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20223193756/v-tvcg-20223193756_Preview.mp4?token=rnzL2n9Tqn4SX1r7OWdTjWOrzDRa2-ZKCQOPcHox-B4&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-tvcg","session_youtube_ff_id":"xUeevjCLhns","session_youtube_ff_link":"https://youtu.be/xUeevjCLhns","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=0h1m15s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T17:57:00Z","title":"Evaluating Graphical Perception of Visual Motion for Quantitative Data Encoding","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233341990","abstract":"We report on challenges and considerations for supporting design processes for visualizations in motion embedded in sports videos. We derive our insights from analyzing swimming race visualizations and motion-related data, building a technology probe, as well as a study with designers. Understanding how to design situated visualizations in motion is important for a variety of contexts. Competitive sports coverage, in particular, increasingly includes information on athlete or team statistics and records. Although moving visual representations attached to athletes or other targets are starting to appear, systematic investigations on how to best support their design process in the context of sports videos are still missing. Our work makes several contributions in identifying opportunities for visualizations to be added to swimming competition coverage but, most importantly, in identifying requirements and challenges for designing situated visualizations in motion. Our investigations include the analysis of a survey with swimming enthusiasts on their motion-related information needs, an ideation workshop to collect designs and elicit design challenges, the design of a technology probe that allows to create embedded visualizations in motion based on real data, and an evaluation with visualization designers that aimed to understand the benefits of designing directly on videos.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Lijie Yao"},{"affiliations":"","email":"","is_corresponding":false,"name":"Romain Vuillemot"},{"affiliations":"","email":"","is_corresponding":false,"name":"Anastasia Bezerianos"},{"affiliations":"","email":"","is_corresponding":false,"name":"Petra Isenberg"}],"award":"","doi":"10.1109/TVCG.2023.3341990","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233341990","image_caption":"Embedded representations added to a swimming video of the 2021 French Championship using our technology probe. These show dynamically updating visualizations that move with the swimmers: distance to the leader and predicted winner (left), speed distance to a personal record (top right), and current speed and swimmers' ages (bottom right). The left and bottom right images also show stationary embedded representations of the swimmers' names, nationality, and elapsed time.","keywords":["Data visualization, Sports, Videos, Probes, Surveys, Authoring systems, Games, Design framework, Embedded visualization, Sports analytics, Visualization in motion"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/7Y2cPfXGiAY&t=0h33m31s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233341990/v-tvcg-20233341990_Preview.mp4?token=UhovQpwtoTs04VevSR-D-3grQfz_P_1fjxzRIrcoJvg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233341990/v-tvcg-20233341990_Preview.srt?token=06mGz18pkPvSVcT0qmxfzeab_bc7Yvju_kHMUBDGMso&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full6","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"Motion and Animated Notions","session_uid":"v-tvcg","session_youtube_ff_id":"lFf8sM52rMc","session_youtube_ff_link":"https://youtu.be/lFf8sM52rMc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/7Y2cPfXGiAY&t=0h33m31s","sessions":["Motion and Animated Notions"],"time_stamp":"2024-10-17T18:21:00Z","title":"Designing for Visualization in Motion: Embedding Visualizations in Swimming Videos","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1272","abstract":"In various scientific and industrial domains, analyzing multivariate spatial data, i.e., vectors associated with spatial locations, is common practice. To analyze those datasets, analysts may turn to methods such as Spatial Blind Source Separation (SBSS). Designed explicitly for spatial data analysis, SBSS finds latent components in the dataset and is superior to popular non-spatial methods, like PCA. However, when analysts try different tuning parameter settings, the amount of latent components complicates analytical tasks. Based on our years-long collaboration with SBSS researchers, we propose a visualization approach to tackle this challenge. The main component is UnDRground Tubes (UT), a general-purpose idiom combining ideas from set visualization and multidimensional projections. We describe the UT visualization pipeline and integrate UT into an interactive multiple-view system. We demonstrate its effectiveness through interviews with SBSS experts, a qualitative evaluation with visualization experts, and computational experiments. SBSS experts were excited about our approach. They saw many benefits for their work and potential applications for geostatistical data analysis more generally. UT was also well received by visualization experts. Our benchmarks show that UT projections and its heuristics are appropriate.","accessible_pdf":false,"authors":[{"affiliations":["TU Wien, Vienna, Austria"],"email":"nikolaus.piccolotto@tuwien.ac.at","is_corresponding":false,"name":"Nikolaus Piccolotto"},{"affiliations":["TU Wien, Vienna, Austria"],"email":"mwallinger@ac.tuwien.ac.at","is_corresponding":true,"name":"Markus Wallinger"},{"affiliations":["Institute of Visual Computing and Human-Centered Technology, Vienna, Austria"],"email":"miksch@ifs.tuwien.ac.at","is_corresponding":false,"name":"Silvia Miksch"},{"affiliations":["TU Wien, Vienna, Austria"],"email":"markus.boegl@tuwien.ac.at","is_corresponding":false,"name":"Markus B\u00f6gl"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1272","image_caption":"The main component of our visualization approach is UnDRground Tubes, which presents glyphs in a grid and connects them by lines according to their set memberships. ","keywords":["Geographical data, multivariate data, set visualization, visual cluster analysis."],"open_access_supplemental_link":"https://osf.io/c7yga/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/zgphx","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/oBQXVSnxy5g&t=0h1m3s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1272/v-full-1272_Preview.mp4?token=v8VZzcOwmzW0CadxHJpyhrgdUUg2hMCG4nfe7dEqYWw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1272/v-full-1272_Preview.srt?token=dukv0aGEH_NoG8urQc9zM3j2VN3RPsA_FK9_KDg_39w&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-full","session_youtube_ff_id":"JAizrYjsDB8","session_youtube_ff_link":"https://youtu.be/JAizrYjsDB8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=0h1m3s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T14:15:00Z","title":"UnDRground Tubes: Exploring Spatial Data With Multidimensional Projections and Set Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1568","abstract":"Dimensionality reduction techniques are widely used for visualizing high-dimensional data. However, support for interpreting patterns of dimension reduction results in the context of the original data space is often insufficient. Consequently, users may struggle to extract insights from the projections. In this paper, we introduce DimBridge, a visual analytics tool that allows users to interact with visual patterns in a projection and retrieve corresponding data patterns. DimBridge supports several interactions, allowing users to perform various analyses, from contrasting multiple clusters to explaining complex latent structures. Leveraging first-order predicate logic, DimBridge identifies subspaces in the original dimensions relevant to a queried pattern and provides an interface for users to visualize and interact with them. We demonstrate how DimBridge can help users overcome the challenges associated with interpreting visual patterns in projections.","accessible_pdf":false,"authors":[{"affiliations":["Tufts University, Medford, United States"],"email":"brianmontambault@gmail.com","is_corresponding":true,"name":"Brian Montambault"},{"affiliations":["Tufts University, Medford, United States"],"email":"gabriel.appleby@gmail.com","is_corresponding":false,"name":"Gabriel Appleby"},{"affiliations":["Tufts University, Boston, United States"],"email":"jen@cs.tufts.edu","is_corresponding":false,"name":"Jen Rogers"},{"affiliations":["Tufts University, Medford, United States"],"email":"camelia_daniela.brumar@tufts.edu","is_corresponding":false,"name":"Camelia D. Brumar"},{"affiliations":["Vanderbilt University, Nashville, United States"],"email":"mingwei.li@tufts.edu","is_corresponding":false,"name":"Mingwei Li"},{"affiliations":["Tufts University, Medford, United States"],"email":"remco@cs.tufts.edu","is_corresponding":false,"name":"Remco Chang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1568","image_caption":"DimBridge helps users understand visual patterns in dimensionality reduction-based 2D projections by identifying relevant subsets of the high-dimensional space.","keywords":["Predicates, Dimensionality Reduction, Explainable Machine Learning"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/oBQXVSnxy5g&t=0h27m3s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1568/v-full-1568_Preview.mp4?token=4rHbL9zypOPcv4QmTnYnprbl4RS8YStj6BPAK4rizBA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1568/v-full-1568_Preview.srt?token=SOI8J1E388R3ADgh6IACzWOJhwfdCSQSGl8g139fTR0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-full","session_youtube_ff_id":"tH3ik7KCn0A","session_youtube_ff_link":"https://youtu.be/tH3ik7KCn0A","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=0h27m3s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T14:39:00Z","title":"DimBridge: Interactive Explanation of Visual Patterns in Dimensionality Reductions with Predicate Logic","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1612","abstract":"Partitionings (or segmentations) divide a given domain into disjoint connected regions whose union forms again the entire domain. Multi-dimensional partitionings occur, for example, when analyzing parameter spaces of simulation models, where each segment of the partitioning represents a region of similar model behavior. Having computed a partitioning, one is commonly interested in understanding how large the segments are and which segments lie next to each other. While visual representations of 2D domain partitionings that reveal sizes and neighborhoods are straightforward, this is no longer the case when considering multi-dimensional domains of three or more dimensions. We propose an algorithm for computing 2D embeddings of multi-dimensional partitionings. The embedding shall have the following properties: It shall maintain the topology of the partitioning and optimize the area sizes and joint boundary lengths of the embedded segments to match the respective sizes and lengths in the multi-dimensional domain. We demonstrate the effectiveness of our approach by applying it to different use cases, including the visual exploration of 3D spatial domain segmentations and multi-dimensional parameter space partitionings of simulation ensembles. We numerically evaluate our algorithm with respect to how well sizes and lengths are preserved depending on the dimensionality of the domain and the number of segments. ","accessible_pdf":true,"authors":[{"affiliations":["University of M\u00fcnster, M\u00fcnster, Germany"],"email":"m_ever14@uni-muenster.de","is_corresponding":true,"name":"Marina Evers"},{"affiliations":["University of M\u00fcnster, M\u00fcnster, Germany"],"email":"linsen@uni-muenster.de","is_corresponding":false,"name":"Lars Linsen"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1612","image_caption":"We present an approach for visualizing a multi-dimensional partitioning in a 2D embedding. Each segment in the embedding corresponds to a multi-dimensional segment of the given partitioning. A multi-dimensional partitioning is modeled as a graph that is embedded into a 2D plane. The graph embedding is used as a starting point for a cellular automaton approach to compute a 2D embedding of the multi-dimensional embedding preserving topology, area, and boundary length. To its outcome, we apply a rendering that highlights relevant features.","keywords":["Multi-dimensional partitionings, segmentations, dimensionality reduction, parameter space visualization."],"open_access_supplemental_link":"https://github.com/marinaevers/segmentation-projection","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"http://arxiv.org/abs/2408.03641","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/oBQXVSnxy5g&t=0h38m37s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1612/v-full-1612_Preview.mp4?token=EUgVBfrye42pazKDBfY0V5t7uuwIUzDQgiF4rdtXwRw&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-full","session_youtube_ff_id":"91i3yDeIi38","session_youtube_ff_link":"https://youtu.be/91i3yDeIi38","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=0h38m37s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T14:51:00Z","title":"2D Embeddings of Multi-dimensional Partitionings","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1632","abstract":"High-dimensional data, characterized by many features, can be difficult to visualize effectively. Dimensionality reduction techniques, such as PCA, UMAP, and t-SNE, address this challenge by projecting the data into a lower-dimensional space while preserving important relationships. TopoMap is another technique that excels at preserving the underlying structure of the data, leading to interpretable visualizations. In particular, TopoMap maps the high-dimensional data into a visual space, guaranteeing that the 0-dimensional persistence diagram of the Rips filtration of the visual space matches the one from the high-dimensional data. However, the original TopoMap algorithm can be slow and its layout can be too sparse for large and complex datasets. In this paper, we propose three improvements to TopoMap: 1) a more space-efficient layout, 2) a significantly faster implementation, and 3) a novel TreeMap-based representation that makes use of the topological hierarchy to aid the exploration of the projections.These advancements make TopoMap, now referred to as TopoMap++, a more powerful tool for visualizing high-dimensional data which we demonstrate through different use case scenarios.","accessible_pdf":false,"authors":[{"affiliations":["New York University, New York City, United States"],"email":"vitoriaguardieiro@gmail.com","is_corresponding":true,"name":"Vitoria Guardieiro"},{"affiliations":["New York University, New York City, United States"],"email":"felipedeoliveira1407@gmail.com","is_corresponding":false,"name":"Felipe Inagaki de Oliveira"},{"affiliations":["Microsoft Research India, Bangalore, India"],"email":"harish.doraiswamy@microsoft.com","is_corresponding":false,"name":"Harish Doraiswamy"},{"affiliations":["University of Sao Paulo, Sao Carlos, Brazil"],"email":"gnonato@icmc.usp.br","is_corresponding":false,"name":"Luis Gustavo Nonato"},{"affiliations":["New York University, New York City, United States"],"email":"csilva@nyu.edu","is_corresponding":false,"name":"Claudio Silva"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1632","image_caption":"Representations of the MNIST database of handwritten digits. (a) This data is projected using TopoMap. (b) The hierarchy defined by the process of topological simplification is visualized as a TreeMap. Each leaf of this tree corresponds to the smallest simplified component with a user-defined minimum number of points. (c) The TopoMap++ representation of the same data where the eleven components selected by the TreeMap are highlighted. As can be seen, TopoMap++ makes much more efficient use of the space compared to TopoMap, thus allowing users to easily analyze the relationships between the different clusters. ","keywords":["Topological data analysis, Computational topology, High-dimensional data, Projection."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/oBQXVSnxy5g&t=1h3m43s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1632/v-full-1632_Preview.mp4?token=NL73IBsAgaCsJAwcWrdzocAOZp8ABJQsqvlxLuVM_aE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1632/v-full-1632_Preview.srt?token=nnuxA1POcFJtGJo3RfG9Z26Htjb_KbUYaTm9HT9mwH0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-full","session_youtube_ff_id":"RHAnJMEbOOQ","session_youtube_ff_link":"https://youtu.be/RHAnJMEbOOQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=1h3m43s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T15:15:00Z","title":"TopoMap++: A faster and more space efficient technique to compute projections with topological guarantees","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233324851","abstract":"Dimensionality reduction (DR) algorithms are diverse and widely used for analyzing high-dimensional data. Various metrics and tools have been proposed to evaluate and interpret the DR results. However, most metrics and methods fail to be well generalized to measure any DR results from the perspective of original distribution fidelity or lack interactive exploration of DR results. There is still a need for more intuitive and quantitative analysis to interactively explore high-dimensional data and improve interpretability. We propose a metric and a generalized algorithm-agnostic approach based on the concept of capacity to evaluate and analyze the DR results. Based on our approach, we develop a visual analytic system HiLow for exploring high-dimensional data and projections. We also propose a mixed-initiative recommendation algorithm that assists users in interactively DR results manipulation. Users can compare the differences in data distribution after the interaction through HiLow. Furthermore, we propose a novel visualization design focusing on quantitative analysis of differences between high and low-dimensional data distributions. Finally, through user study and case studies, we validate the effectiveness of our approach and system in enhancing the interpretability of projections and analyzing the distribution of high and low-dimensional data.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":false,"name":"Yang Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Jisheng Liu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chufan Lai"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yuan Zhou"},{"affiliations":"","email":"","is_corresponding":true,"name":"Siming Chen"}],"award":"","doi":"10.1109/TVCG.2023.3324851","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233324851","image_caption":"Dimensionality reduction (DR) algorithms are diverse and widely used for analyzing high-dimensional data. We propose a metric and a generalized algorithm-agnostic approach based on the concept of capacity to evaluate and analyze the DR results. Based on our approach, we develop a visual analytic system HiLow for exploring high-dimensional data and projections. We also propose a mixed-initiative recommendation algorithm that assists users in interactively DR results manipulation. Users can compare the differences in data distribution after the interaction through HiLow. Furthermore, we propose a novel visualization design focusing on quantitative analysis of differences between high and low-dimensional data distributions. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/oBQXVSnxy5g&t=0h14m13s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233324851/v-tvcg-20233324851_Preview.mp4?token=DkmstbWOPNnXDNMazsNNAd6L4fkboZage8zf-mcmILc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233324851/v-tvcg-20233324851_Preview.srt?token=qwSdAz2hsKoC0m49tsZsR13eFd7B9mGqYLf3c4o_7MM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-tvcg","session_youtube_ff_id":"q2ETleQA0KE","session_youtube_ff_link":"https://youtu.be/q2ETleQA0KE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=0h14m13s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T14:27:00Z","title":"Interpreting High-Dimensional Projections With Capacity","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243364841","abstract":"The need to understand the structure of hierarchical or high-dimensional data is present in a variety of fields. Hyperbolic spaces have proven to be an important tool for embedding computations and analysis tasks as their non-linear nature lends itself well to tree or graph data. Subsequently, they have also been used in the visualization of high-dimensional data, where they exhibit increased embedding performance. However, none of the existing dimensionality reduction methods for embedding into hyperbolic spaces scale well with the size of the input data. That is because the embeddings are computed via iterative optimization schemes and the computation cost of every iteration is quadratic in the size of the input. Furthermore, due to the non-linear nature of hyperbolic spaces, Euclidean acceleration structures cannot directly be translated to the hyperbolic setting. This paper introduces the first acceleration structure for hyperbolic embeddings, building upon a polar quadtree. We compare our approach with existing methods and demonstrate that it computes embeddings of similar quality in significantly less time. Implementation and scripts for the experiments can be found at this https URL.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Martin Skrodzki"},{"affiliations":"","email":"","is_corresponding":false,"name":"Hunter van Geffen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Nicolas F. Chaves-de-Plaza"},{"affiliations":"","email":"","is_corresponding":false,"name":"Thomas H\u00f6llt"},{"affiliations":"","email":"","is_corresponding":false,"name":"Elmar Eisemann"},{"affiliations":"","email":"","is_corresponding":false,"name":"Klaus Hildebrandt"}],"award":"","doi":"10.1109/TVCG.2024.3364841","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243364841","image_caption":"An embedding of the C.Elegans data set with colored clusters on the right. Left shows an overlay of our tree acceleration structure. The red mark indicates the query point where the grid resolution is high, whereas it is low everywhere else in the embedding. This speeds up embedding computations significantly.","keywords":["Human-Computer Interaction (cs.HC); Artificial Intelligence (cs.AI); Machine Learning (cs.LG); Quantitative Methods (q-bio.QM); Machine Learning (stat.ML) Dimensionality reduction, t-SNE, hyperbolic embedding, acceleration structure"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/oBQXVSnxy5g&t=0h51m40s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243364841/v-tvcg-20243364841_Preview.mp4?token=OvZmub6iUHlYHZyKQa-ZeE_96Y2orscgxFdhA1RHjg4&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"Dimensionality Reduction","session_uid":"v-tvcg","session_youtube_ff_id":"QwwSaWLUn_c","session_youtube_ff_link":"https://youtu.be/QwwSaWLUn_c","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/oBQXVSnxy5g&t=0h51m40s","sessions":["Dimensionality Reduction"],"time_stamp":"2024-10-16T15:03:00Z","title":"Accelerating hyperbolic t-SNE","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1153","abstract":"Points of interest on a map such as restaurants, hotels, or subway stations, give rise to categorical point data: data that have a fixed location and one or more categorical attributes. Consequently, recent years have seen various set visualization approaches that visually connect points of the same category to support users in understanding the spatial distribution of categories. Existing methods use complex and often highly irregular shapes to connect points of the same category, leading to high cognitive load for the user. In this paper we introduce SimpleSets, which uses simple shapes to enclose categorical point patterns, thereby providing a clean overview of the data distribution. SimpleSets is designed to visualize sets of points with a single categorical attribute; as a result, the point patterns enclosed by SimpleSets form a partition of the data. We give formal definitions of point patterns that correspond to simple shapes and describe an algorithm that partitions categorical points into few such patterns. Our second contribution is a rendering algorithm that transforms a given partition into a clean set of shapes resulting in an aesthetically pleasing set visualization. Our algorithm pays particular attention to resolving intersections between nearby shapes in a consistent manner. We compare SimpleSets to the state-of-the-art set visualizations using standard datasets from the literature.","accessible_pdf":false,"authors":[{"affiliations":["TU Eindhoven, Eindhoven, Netherlands"],"email":"s.w.v.d.broek@tue.nl","is_corresponding":true,"name":"Steven van den Broek"},{"affiliations":["TU Eindhoven, Eindhoven, Netherlands"],"email":"w.meulemans@tue.nl","is_corresponding":false,"name":"Wouter Meulemans"},{"affiliations":["TU Eindhoven, Eindhoven, Netherlands"],"email":"b.speckmann@tue.nl","is_corresponding":false,"name":"Bettina Speckmann"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1153","image_caption":"A SimpleSets visualization of mills around Leeuwarden, The Netherlands. The mill types are: angular mill (blue); vertical wind engine (green); spider head mill (orange); and tjasker (purple). Data by https://molendatabase.nl with permission, map from https://www.openstreetmap.org/copyright.","keywords":["Set visualization, geographic visualization, algorithms"],"open_access_supplemental_link":"https://doi.org/10.5281/zenodo.12784670","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.14433","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/vNaxXisbG4Y&t=1h4m2s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1153/v-full-1153_Preview.mp4?token=Z6BFddSYmiI-39mV2cow1IHd-5BfUyXrkwS1rpLjwqs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1153/v-full-1153_Preview.srt?token=29nzWoOd9pyyM4jTtZLCrsKC0TNf4f_WXUBk5eZN3cM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-full","session_youtube_ff_id":"vZk9Sm6PIIo","session_youtube_ff_link":"https://youtu.be/vZk9Sm6PIIo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=1h4m2s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T15:15:00Z","title":"SimpleSets: Capturing Categorical Point Patterns with Simple Shapes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1307","abstract":"Building Information Modeling (BIM) describes a central data pool covering the entire life cycle of a construction project. Similarly, Building Energy Modeling (BEM) describes the process of using a 3D representation of a building as a basis for thermal simulations to assess the building\u2019s energy performance. This paper explores the intersection of BIM and BEM, focusing on the challenges and methodologies in converting BIM data into BEM representations for energy performance analysis. BEMTrace integrates 3D data wrangling techniques with visualization methodologies to enhance the accuracy and traceability of the BIM-to-BEM conversion process. Through parsing, error detection, and algorithmic correction of BIM data, our methods generate valid BEM models suitable for energy simulation. Visualization techniques provide transparent insights into the conversion process, aiding error identification, validation, and user comprehension. We introduce context-adaptive selections to facilitate user interaction and to show that the BEMTrace workflow helps users understand complex 3D data wrangling processes.","accessible_pdf":true,"authors":[{"affiliations":["VRVis Zentrum f\u00fcr Virtual Reality und Visualisierung Forschungs-GmbH, Vienna, Austria"],"email":"walch@vrvis.at","is_corresponding":true,"name":"Andreas Walch"},{"affiliations":["VRVis Zentrum f\u00fcr Virtual Reality und Visualisierung Forschungs-GmbH, Vienna, Austria"],"email":"szabo@vrvis.at","is_corresponding":false,"name":"Attila Szabo"},{"affiliations":["VRVis Zentrum f\u00fcr Virtual Reality und Visualisierung Forschungs-GmbH, Vienna, Austria"],"email":"hs@vrvis.at","is_corresponding":false,"name":"Harald Steinlechner"},{"affiliations":["Independent Researcher, Vienna, Austria"],"email":"thomas@ortner.fyi","is_corresponding":false,"name":"Thomas Ortner"},{"affiliations":["Institute of Visual Computing "," Human-Centered Technology, Vienna, Austria"],"email":"groeller@cg.tuwien.ac.at","is_corresponding":false,"name":"Eduard Gr\u00f6ller"},{"affiliations":["VRVis Zentrum f\u00fcr Virtual Reality und Visualisierung Forschungs-GmbH, Vienna, Austria"],"email":"johanna.schmidt@vrvis.at","is_corresponding":false,"name":"Johanna Schmidt"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1307","image_caption":"BEMTrace enhances the data curation process from a Building Information Model (BIM) to a Building Energy Model (BEM) by providing visual support for the BIM-to-BEM conversion. Users can access various views to better understand the complex data transformation, including the BIM World, BEM World, and the Relationship View, which illustrates the transition between them. Context-adaptive selections assist users in navigating these views, allowing for detailed exploration of different data aspects. This approach ensures a clearer understanding of the conversion process and helps in resolving any arising conflicts.","keywords":["BIM, BEM, BIM-to-BEM, 3D Data Wrangling, 3D selections, Visualization for trust building"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.19464","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/vNaxXisbG4Y&t=0h14m42s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1307/v-full-1307_Preview.mp4?token=lopxc4Sly_btDsX1Mo-dcgzS-HTbPGfRVSfPfSX0QbI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1307/v-full-1307_Preview.srt?token=jJddu6bRbUOyN7zSlfWx89aCEUqHMLLlhABaav6_M6w&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-full","session_youtube_ff_id":"AwIuPtpFz-k","session_youtube_ff_link":"https://youtu.be/AwIuPtpFz-k","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=0h14m42s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T14:27:00Z","title":"BEMTrace: Visualization-driven approach for deriving Building Energy Models from BIM","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1681","abstract":"In recent years, the global adoption of electric vehicles (EVs) has surged, prompting a corresponding rise in the installation of charging stations. This proliferation has underscored the importance of expediting the deployment of charging infrastructure. Both academia and industry have thus devoted to addressing the charging station location problem (CSLP) to streamline this process. However, prevailing algorithms addressing CSLP are hampered by restrictive assumptions and computational overhead, leading to a dearth of comprehensive evaluations in the spatiotemporal dimensions. Consequently, their practical viability is restricted. Moreover, the placement of charging stations exerts a significant impact on both the road network and the power grid, which necessitates the evaluation of the potential post-deployment impacts on these interconnected networks holistically. In this study, we propose CSLens, a visual analytics system designed to inform charging station deployment decisions through the lens of coupled transportation and power networks. CSLens offers multiple visualizations and interactive features, empowering users to delve into the existing charging station layout, explore alternative deployment solutions, and assess the ensuring impact. To validate the efficacy of CSLens, we conducted two case studies and engaged in interviews with domain experts. Through these efforts, we substantiated the usability and practical utility of CSLens in enhancing the decision-making process surrounding charging station deployment. Our findings underscore CSLens\u2019s potential to serve as a valuable asset in navigating the complexities of charging infrastructure planning.","accessible_pdf":false,"authors":[{"affiliations":["Sun Yat-sen University, Shenzhen, China"],"email":"zhangyt85@mail2.sysu.edu.cn","is_corresponding":true,"name":"Yutian Zhang"},{"affiliations":["Sun Yat-sen University, Shenzhen, China"],"email":"xulw8@mail2.sysu.edu.cn","is_corresponding":false,"name":"Liwen Xu"},{"affiliations":["Sun Yat-sen University, Shenzhen, China"],"email":"taoshc@mail2.sysu.edu.cn","is_corresponding":false,"name":"Shaocong Tao"},{"affiliations":["Sun Yat-sen University, Shenzhen, China"],"email":"guanqx3@mail.sysu.edu.cn","is_corresponding":false,"name":"Quanxue Guan"},{"affiliations":["ShanghaiTech University, Shanghai, China"],"email":"liquan@shanghaitech.edu.cn","is_corresponding":false,"name":"Quan Li"},{"affiliations":["Sun Yat-sen University, Shenzhen, China"],"email":"zenghp5@mail.sysu.edu.cn","is_corresponding":false,"name":"Haipeng Zeng"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1681","image_caption":"CSLens facilitates the implementation of new charging stations within the coupled transportation and power networks. The Temporal Overview (A) analyzes the fluctuations in traffic hotspots and charging demand. In the Control Panel (B), users can adjust parameters to generate solutions for charging station deployment. The Charging Station Info (C) provides key attributes of charging stations. The Map View (D) furnishes detailed information on traffic volume, charging demand and charging stations. The Result View (E) and the Impact View (F) enable users to compare various solutions and evaluate their respective impacts on the road network and the power grid.","keywords":["Charging station location problem, Visual analytics, Decision-making"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/vNaxXisbG4Y&t=0h52m31s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1681/v-full-1681_Preview.mp4?token=3S6TVYPNim5-H1pVaXrfj9MMJ5GUV-fnBLAlWj5Cu9Y&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-full","session_youtube_ff_id":"qZcYIS995YE","session_youtube_ff_link":"https://youtu.be/qZcYIS995YE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=0h52m31s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T15:03:00Z","title":"CSLens: Towards Better Deploying Charging Stations via Visual Analytics \u2014\u2014 A Coupled Networks Perspective","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233332511","abstract":"We present Submerse, an end-to-end framework for visualizing flooding scenarios on large and immersive display ecologies. Specifically, we reconstruct a surface mesh from input flood simulation data and generate a to-scale 3D virtual scene by incorporating geographical data such as terrain, textures, buildings, and additional scene objects. To optimize computation and memory performance for large simulation datasets, we discretize the data on an adaptive grid using dynamic quadtrees and support level-of-detail based rendering. Moreover, to provide a perception of flooding direction for a time instance, we animate the surface mesh by synthesizing water waves. As interaction is key for effective decision-making and analysis, we introduce two novel techniques for flood visualization in immersive systems: (1) an automatic scene-navigation method using optimal camera viewpoints generated for marked points-of-interest based on the display layout, and (2) an AR-based focus+context technique using an aux display system. Submerse is developed in collaboration between computer scientists and atmospheric scientists. We evaluate the effectiveness of our system and application by conducting workshops with emergency managers, domain experts, and concerned stakeholders in the Stony Brook Reality Deck, an immersive gigapixel facility, to visualize a superstorm flooding scenario in New York City.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Saeed Boorboor"},{"affiliations":"","email":"","is_corresponding":false,"name":"Yoonsang Kim"},{"affiliations":"","email":"","is_corresponding":false,"name":"Ping Hu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Josef Moses"},{"affiliations":"","email":"","is_corresponding":false,"name":"Brian Colle"},{"affiliations":"","email":"","is_corresponding":false,"name":"Arie E. Kaufman"}],"award":"","doi":"10.1109/TVCG.2023.3332511","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233332511","image_caption":"Submerse is an end-to-end framework for visualizing flooding scenarios on large and immersive display ecologies. It generates a to-scale 3D virtual scene by incorporating flood simulation data and geographical data such as terrain, textures, buildings, and additional scene objects. Submerse implements two novel techniques: (1) an automatic scene-navigation method using optimal camera viewpoints generated for marked points-of-interest based on the display layout, and (2) an AR-based focus+context technique using an aux display system. We demonstrate the system on the Stony Brook University Reality Deck.","keywords":["Camera navigation, flooding simulation visualization, immersive visualization, mixed reality"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/vNaxXisbG4Y&t=0h0m27s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233332511/v-tvcg-20233332511_Preview.mp4?token=x3Vbv2n6-cRii7JvFIx0dtFeY7hm1Yazmcnx8QrNYLo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233332511/v-tvcg-20233332511_Preview.srt?token=73so2ph6C7OcvEkc3GLhO-aehnd0Uv3yiXNlL1dR584&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-tvcg","session_youtube_ff_id":"CjTHaJsd0-8","session_youtube_ff_link":"https://youtu.be/CjTHaJsd0-8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=0h0m27s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T14:15:00Z","title":"Submerse: Visualizing Storm Surge Flooding Simulations in Immersive Display Ecologies","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20233333356","abstract":"As urban populations grow, effectively accessing urban performance measures such as livability and comfort becomes increasingly important due to their significant socioeconomic impacts. While Point of Interest (POI) data has been utilized for various applications in location-based services, its potential for urban performance analytics remains unexplored. In this paper, we present SenseMap, a novel approach for analyzing urban performance by leveraging POI data as a semantic representation of urban functions. We quantify the contribution of POIs to different urban performance measures by calculating semantic textual similarities on our constructed corpus. We propose Semantic-adaptive Kernel Density Estimation which takes into account POIs\u2019 in\ufb02uential areas across different Traf\ufb01c Analysis Zones and semantic contributions to generate semantic density maps for measures. We design and implement a feature-rich, real-time visual analytics system for users to explore the urban performance of their surroundings. Evaluations with human judgment and reference data demonstrate the feasibility and validity of our method. Usage scenarios and user studies demonstrate the capability, usability, and explainability of our system.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Juntong Chen"},{"affiliations":"","email":"","is_corresponding":false,"name":"Qiaoyun Huang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Changbo Wang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Chenhui Li"}],"award":"","doi":"10.1109/TVCG.2023.3333356","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20233333356","image_caption":"The user interface of SenseMap: A. The map view in exploration and filter states, displaying semantic maps, circular query targets, and filtered regions; B. The navigation view, enabling adjustments to regional query parameters and navigation between POIs; C. The comparison view facilitates the comparison and analysis of measures across urban areas.","keywords":["Urban data, semantic textual similarity, point of interest, density map, visual analytics, visualization design"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/vNaxXisbG4Y&t=0h39m32s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233333356/v-tvcg-20233333356_Preview.mp4?token=sneOhA3BxgnybyDtxPqOZGsuH5Gc6aaiCekUKYQBXkE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20233333356/v-tvcg-20233333356_Preview.srt?token=EQI69AJ6rvTxPtwm3YQQlPjXgjGRLfZ88SnDz_dMKO0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-tvcg","session_youtube_ff_id":"S-OPwGCXsMo","session_youtube_ff_link":"https://youtu.be/S-OPwGCXsMo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=0h39m32s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T14:51:00Z","title":"SenseMap: Urban Performance Visualization and Analytics via Semantic Textual Similarity","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243392587","abstract":"The issue of traffic congestion poses a significant obstacle to the development of global cities. One promising solution to tackle this problem is intelligent traffic signal control (TSC). Recently, TSC strategies leveraging reinforcement learning (RL) have garnered attention among researchers. However, the evaluation of these models has primarily relied on fixed metrics like reward and queue length. This limited evaluation approach provides only a narrow view of the model\u2019s decision-making process, impeding its practical implementation. Moreover, effective TSC necessitates coordinated actions across multiple intersections. Existing visual analysis solutions fall short when applied in multi-agent settings. In this study, we delve into the challenge of interpretability in multi-agent reinforcement learning (MARL), particularly within the context of TSC. We propose MARLens, a visual analytics system tailored to understand MARL-based TSC. Our system serves as a versatile platform for both RL and TSC researchers. It empowers them to explore the model\u2019s features from various perspectives, revealing its decision-making processes and shedding light on interactions among different agents. To facilitate quick identification of critical states, we have devised multiple visualization views, complemented by a traffic simulation module that allows users to replay specific training scenarios. To validate the utility of our proposed system, we present three comprehensive case studies, incorporate insights from domain experts through interviews, and conduct a user study. These collective efforts underscore the feasibility and effectiveness of MARLens in enhancing our understanding of MARL-based TSC systems and pave the way for more informed and efficient traffic management strategies.","accessible_pdf":true,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Yutian Zhang"},{"affiliations":"","email":"","is_corresponding":false,"name":"Guohong Zheng"},{"affiliations":"","email":"","is_corresponding":false,"name":"Zhiyuan Liu"},{"affiliations":"","email":"","is_corresponding":false,"name":"Quan Li"},{"affiliations":"","email":"","is_corresponding":false,"name":"Haipeng Zeng"}],"award":"","doi":"10.1109/TVCG.2024.3392587","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243392587","image_caption":"MARLens provides an in-depth analysis of reinforcement-learning-based traffic signal control. The Control Panel (A) presents parameters of the model. The Training Distribution (B) provides the distribution of metrics and ranks episodes. The Episode Overview (C) summarizes the traffic conditions and agents' policies at a certain episode. The Episode Detail (D) provides a summary for each agent in an episode, including states, actions and relationships among agents. The Policy Explainer (E) provides explanations between state and action. The Simulation Replay (F) supports the replay of an episode or time step. The Snapshot Log (G) saves the snapshots of the Policy Explainer.","keywords":["Traffic signal control, multi-agent, reinforcement learning, visual analytics"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/vNaxXisbG4Y&t=0h26m35s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243392587/v-tvcg-20243392587_Preview.mp4?token=y-4GRT4qfjHVR1kac6qvxMnL7ooqMxs-3oOePS5YQJs&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full8","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Urban Planning, Construction, and Disaster Management","session_uid":"v-tvcg","session_youtube_ff_id":"vGdbrKKW2V8","session_youtube_ff_link":"https://youtu.be/vGdbrKKW2V8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/vNaxXisbG4Y&t=0h26m35s","sessions":["Urban Planning, Construction, and Disaster Management"],"time_stamp":"2024-10-16T14:39:00Z","title":"MARLens: Understanding Multi-agent Reinforcement Learning for Traffic Signal Control via Visual Analytics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1032","abstract":"Dynamic topic modeling is useful at discovering the development and change in latent topics over time. However, present methodology relies on algorithms that separate document and word representations. This prevents the creation of a meaningful embedding space where changes in word usage and documents can be directly analyzed in a temporal context. This paper proposes an expansion of the compass-aligned temporal Word2Vec methodology into dynamic topic modeling. Such a method allows for the direct comparison of word and document embeddings across time in dynamic topics. This enables the creation of visualizations that incorporate temporal word embeddings within the context of documents into topic visualizations. In experiments against the current state-of-the-art, our proposed method demonstrates overall competitive performance in topic relevancy and diversity across temporal datasets of varying size. Simultaneously, it provides insightful visualizations focused on temporal word embeddings while maintaining the insights provided by global topic evolution, advancing our understanding of how topics evolve over time.","accessible_pdf":false,"authors":[{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"d4n1elp@vt.edu","is_corresponding":true,"name":"Daniel Palamarchuk"},{"affiliations":["Virginia Polytechnic Institute of Technology , Blacksburg, United States"],"email":"lemaraw@vt.edu","is_corresponding":false,"name":"Lemara Williams"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"bmayer@cs.vt.edu","is_corresponding":false,"name":"Brian Mayer"},{"affiliations":["Savannah River National Laboratory, Aiken, United States"],"email":"thomas.danielson@srnl.doe.gov","is_corresponding":false,"name":"Thomas Danielson"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"rfaust1@tulane.edu","is_corresponding":false,"name":"Rebecca Faust"},{"affiliations":["Savannah River National Laboratory, Aiken, United States"],"email":"larry.deschaine@srnl.doe.gov","is_corresponding":false,"name":"Larry M Deschaine PhD"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"north@vt.edu","is_corresponding":false,"name":"Chris North"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1032","image_caption":"We present the dynamic topic modeling method called Temporal Topic Embeddings with a Compass. The top-right image illustrates how this method effectively generates a plot of term movements within the context of documents and their associated topics. The outer image showcases TimeLink, a tool that compares word vectors in both global and local topic contexts. The red boxes correspond to the respective time periods: the time represented in the scatterplot and where that time is represented in the Sankey diagram.","keywords":["High dimensional data, Dynamic topic modeling, Cluster analysis"],"open_access_supplemental_link":"https://github.com/danilka4/ttec","open_access_supplemental_question":"Yes, external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/H85FqQyR25U&t=0h0m56s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1032/v-full-1032_Preview.mp4?token=1legcsYWlMjPEyQIyMzGdETw2sOSGdikDiSu2YZj514&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1032/v-full-1032_Preview.srt?token=qsKLVKCIpveAxu1_viLv_l88bBusuLDQvhSGZpd_8uc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-full","session_youtube_ff_id":"49ktTLyplJc","session_youtube_ff_link":"https://youtu.be/49ktTLyplJc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=0h0m56s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T12:30:00Z","title":"Visualizing Temporal Topic Embeddings with a Compass","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1128","abstract":"Citations allow quickly identifying related research. If multiple publications are selected as seeds, specific suggestions for related literature can be made based on the number of incoming and outgoing citation links to this selection. Interactively adding recommended publications to the selection refines the next suggestion and incrementally builds a relevant collection of publications. Following this approach, the paper presents a search and foraging approach, PUREsuggest, which combines citation-based suggestions with augmented visualizations of the citation network. The focus and novelty of the approach is, first, the transparency of how the rankings are explained visually and, second, that the process can be steered through user-defined keywords, which reflect topics of interests. The system can be used to build new literature collections, to update and assess existing ones, as well as to use the collected literature for identifying relevant experts in the field. We evaluated the recommendation approach through simulated sessions and performed a user study investigating search strategies and usage patterns supported by the interface.","accessible_pdf":true,"authors":[{"affiliations":["University of Bamberg, Bamberg, Germany"],"email":"fabian.beck@uni-bamberg.de","is_corresponding":true,"name":"Fabian Beck"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1128","image_caption":"The figure showcases the PUREsuggest interface, a tool designed for citation-based literature search and visual exploration. The interface includes three main components: a list of currently selected publications, a list of suggested publications based on citation links, and a visualization of the citation network. Users can refine searches by adding publications and entering custom keywords to amplify specific research topics, facilitating an interactive and dynamic approach to discovering relevant literature.","keywords":["Scientific literature search, citation network visualization, visual recommender system."],"open_access_supplemental_link":"https://osf.io/94ebr/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.02508","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/H85FqQyR25U&t=0h52m54s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1128/v-full-1128_Preview.mp4?token=xhnURLvMH8Q9yodeDEVJde2vWGBMegY00mSFQlcFmj4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1128/v-full-1128_Preview.srt?token=v7vyN82iJ7g1kfkcvAnJRGj2dKDfBMOaUHZsR00BN3s&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-full","session_youtube_ff_id":"obWhz2SJuzg","session_youtube_ff_link":"https://youtu.be/obWhz2SJuzg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=0h52m54s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T13:18:00Z","title":"PUREsuggest: Citation-based Literature Search and Visual Exploration with Keyword-controlled Rankings","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1489","abstract":"Projecting high-dimensional vectors into two dimensions for visualization, known as embedding visualization, facilitates perceptual reasoning and interpretation. Comparing multiple embedding visualizations drives decision-making in many domains, but traditional comparison methods are limited by a reliance on direct point correspondences. This requirement precludes comparisons without point correspondences, such as two different datasets of annotated images, and fails to capture meaningful higher-level relationships among point groups. To address these shortcomings, we propose a general framework for comparing embedding visualizations based on shared class labels rather than individual points. Our approach partitions points into regions corresponding to three key class concepts\u2014confusion, neighborhood, and relative size\u2014to characterize intra- and inter-class relationships. Informed by a preliminary user study, we implemented our framework using perceptual neighborhood graphs to define these regions and introduced metrics to quantify each concept.We demonstrate the generality of our framework with usage scenarios from machine learning and single-cell biology, highlighting our metrics' ability to draw insightful comparisons across label hierarchies. To assess the effectiveness of our approach, we conducted an evaluation study with five machine learning researchers and six single-cell biologists using an interactive and scalable prototype built with Python, JavaScript, and Rust. Our metrics enable more structured comparisons through visual guidance and increased participants\u2019 confidence in their findings.","accessible_pdf":true,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"trevor_manz@g.harvard.edu","is_corresponding":true,"name":"Trevor Manz"},{"affiliations":["Ozette Technologies, Seattle, United States"],"email":"f.lekschas@gmail.com","is_corresponding":false,"name":"Fritz Lekschas"},{"affiliations":["Ozette Technologies, Seattle, United States"],"email":"palmergreene@gmail.com","is_corresponding":false,"name":"Evan Greene"},{"affiliations":["Ozette Technologies, Seattle, United States"],"email":"greg@ozette.com","is_corresponding":false,"name":"Greg Finak"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1489","image_caption":"Our framework addresses limitations in traditional embedding visualization comparisons by focusing on shared class labels rather than individual point correspondences. We characterize intra- and inter-class relationships through three key concepts: confusion, neighborhood, and relative size. Here, we contrast standard and transformed UMAP projections of biological data, showcasing healthy tissue vs cancer tissue embedding visualizations. Central panes with quantitative color encoding illustrate how our metrics quantify these concepts and guide comparisons exploration. This approach enables structured comparisons of diverse datasets, as demonstrated with machine learning and single-cell biology examples. Our interactive prototype facilitates insightful analysis of high-dimensional data projections, enhancing researchers' interpretation and confidence in their findings. ","keywords":["visualization, comparison, high-dimensional data, dimensionality reduction, embeddings"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://osf.io/puxnf","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/H85FqQyR25U&t=0h13m34s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1489/v-full-1489_Preview.mp4?token=r1czoxoMzgDBguv0DW_GGZSz9mE6Bydk9Vp-An3d2Is&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1489/v-full-1489_Preview.srt?token=bKAB3CZKUX_E4F02E59-aU0wJpguBNSh2AaQK1cctqA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-full","session_youtube_ff_id":"NOQMkUdisUc","session_youtube_ff_link":"https://youtu.be/NOQMkUdisUc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=0h13m34s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T12:42:00Z","title":"A General Framework for Comparing Embedding Visualizations Across Class-Label Hierarchies","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1603","abstract":"Multi-modal embeddings form the foundation for vision-language models, such as CLIP embeddings, the most widely used text-image embeddings. However, these embeddings are vulnerable to subtle misalignment of cross-modal features, resulting in decreased model performance and diminished generalization. To address this problem, we design ModalChorus, an interactive system for visual probing and alignment of multi-modal embeddings. ModalChorus primarily offers a two-stage process: 1) embedding probing with Modal Fusion Map (MFM), a novel parametric dimensionality reduction method that integrates both metric and nonmetric objectives to enhance modality fusion; and 2) embedding alignment that allows users to interactively articulate intentions for both point-set and set-set alignments. Quantitative and qualitative comparisons for CLIP embeddings with existing dimensionality reduction (e.g., t-SNE and MDS) and data fusion (e.g., data context map) methods demonstrate the advantages of MFM in showcasing cross-modal features over common vision-language datasets. Case studies reveal that ModalChorus can facilitate intuitive discovery of misalignment and efficient re-alignment in scenarios ranging from zero-shot classification to cross-modal retrieval and generation.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"yyebd@connect.ust.hk","is_corresponding":true,"name":"Yilin Ye"},{"affiliations":["The Hong Kong University of Science and Technology(Guangzhou), Guangzhou, China"],"email":"sxiao713@connect.hkust-gz.edu.cn","is_corresponding":false,"name":"Shishi Xiao"},{"affiliations":["the Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"xingchen.zeng@outlook.com","is_corresponding":false,"name":"Xingchen Zeng"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China","The Hong Kong University of Science and Technology, Hong Kong SAR, China"],"email":"weizeng@hkust-gz.edu.cn","is_corresponding":false,"name":"Wei Zeng"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1603","image_caption":"ModalChorus supports multi-modal embeddings visualization with Modal Fusion Map and interactive alignment.","keywords":["Multi-modal embeddings, dimensionality reduction, data fusion, interactive alignment"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.12315","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/H85FqQyR25U&t=0h28m21s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1603/v-full-1603_Preview.mp4?token=U2SeRph0CmCty6EkL1awqGasK-mUH3IZYpTsKqatksE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-full","session_youtube_ff_id":"oJrEG0FkEYw","session_youtube_ff_link":"https://youtu.be/oJrEG0FkEYw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=0h28m21s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T12:54:00Z","title":"ModalChorus: Visual Probing and Alignment of Multi-modal Embeddings via Modal Fusion Map","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1770","abstract":"The semantic similarity between documents of a text corpus can be visualized using map-like metaphors based on two-dimensional scatterplot layouts. These layouts result from a dimensionality reduction on the document-term matrix or a representation within a latent embedding, including topic models. Thereby, the resulting layout depends on the input data and hyperparameters of the dimensionality reduction and is therefore affected by changes in them. Furthermore, the resulting layout is affected by changes in the input data and hyperparameters of the dimensionality reduction. However, such changes to the layout require additional cognitive efforts from the user. In this work, we present a sensitivity study that analyzes the stability of these layouts concerning (1) changes in the text corpora, (2) changes in the hyperparameter, and (3) randomness in the initialization. Our approach has two stages: data measurement and data analysis. First, we derived layouts for the combination of three text corpora and six text embeddings and a grid-search-inspired hyperparameter selection of the dimensionality reductions. Afterward, we quantified the similarity of the layouts through ten metrics, concerning local and global structures and class separation. Second, we analyzed the resulting 42817 tabular data points in a descriptive statistical analysis. From this, we derived guidelines for informed decisions on the layout algorithm and highlight specific hyperparameter settings. We provide our implementation as a Git repository at https://github.com/hpicgs/Topic-Models-and-Dimensionality-Reduction-Sensitivity-Study and results as Zenodo archive at https://doi.org/10.5281/zenodo.12772898.","accessible_pdf":false,"authors":[{"affiliations":["University of Potsdam, Digital Engineering Faculty, Hasso Plattner Institute, Potsdam, Germany"],"email":"daniel.atzberger@hpi.de","is_corresponding":true,"name":"Daniel Atzberger"},{"affiliations":["University of Potsdam, Potsdam, Germany"],"email":"tcech@uni-potsdam.de","is_corresponding":false,"name":"Tim Cech"},{"affiliations":["Hasso Plattner Institute, Faculty of Digital Engineering, University of Potsdam, Potsdam, Germany"],"email":"willy.scheibel@hpi.de","is_corresponding":false,"name":"Willy Scheibel"},{"affiliations":["Hasso Plattner Institute, Faculty of Digital Engineering, University of Potsdam, Potsdam, Germany"],"email":"juergen.doellner@hpi.de","is_corresponding":false,"name":"J\u00fcrgen D\u00f6llner"},{"affiliations":["Utrecht University, Utrecht, Netherlands"],"email":"m.behrisch@uu.nl","is_corresponding":false,"name":"Michael Behrisch"},{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"tobias.schreck@cgv.tugraz.at","is_corresponding":false,"name":"Tobias Schreck"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1770","image_caption":"Exemplary comparison of pairs of scatterplots. To analyze the stability concerning input data, we compare pairs of scatterplots that only differ in the amount of jitter applied to the DTM. To analyze the stability concerning hyperparameters, we compare pairs of scatterplots that differ in one hyperparameter setting with consecutive values. To analyze stability concerning randomness, we compare two layouts that only differ in their seeds.","keywords":["Text spatializations, text embeddings, topic modeling, dimensionality reductions, stability, benchmarking"],"open_access_supplemental_link":"https://zenodo.org/records/12772899","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2407.17876","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/H85FqQyR25U&t=0h40m32s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1770/v-full-1770_Preview.mp4?token=tW2cl0IdREAQh7MB2CO5gIe0FL6l_fBX0wOR_x1bJHI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1770/v-full-1770_Preview.srt?token=X3q6M7zoNPgkA5ue2Lj4m8txAjiTyWvmkq0EiKTfQhY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-full","session_youtube_ff_id":"T3hvGmZlBgw","session_youtube_ff_link":"https://youtu.be/T3hvGmZlBgw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=0h40m32s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T13:06:00Z","title":"A Large-Scale Sensitivity Analysis on Latent Embeddings and Dimensionality Reductions for Text Spatializations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-tvcg-20243381453","abstract":"Scatterplots provide a visual representation of bivariate data (or 2D embeddings of multivariate data) that allows for effective analyses of data dependencies, clusters, trends, and outliers. Unfortunately, classical scatterplots suffer from scalability issues, since growing data sizes eventually lead to overplotting and visual clutter on a screen with a fixed resolution, which hinders the data analysis process. We propose an algorithm that compensates for irregular sample distributions by a smooth transformation of the scatterplot's visual domain. Our algorithm evaluates the scatterplot's density distribution to compute a regularization mapping based on integral images of the rasterized density function. The mapping preserves the samples' neighborhood relations. Few regularization iterations suffice to achieve a nearly uniform sample distribution that efficiently uses the available screen space. We further propose approaches to visually convey the transformation that was applied to the scatterplot and compare them in a user study. We present a novel parallel algorithm for fast GPU-based integral-image computation, which allows for integrating our de-cluttering approach into interactive visual data analysis systems.","accessible_pdf":false,"authors":[{"affiliations":"","email":"","is_corresponding":true,"name":"Hennes Rave"},{"affiliations":"","email":"","is_corresponding":false,"name":"Vladimir Molchanov"},{"affiliations":"","email":"","is_corresponding":false,"name":"Lars Linsen"}],"award":"","doi":"10.1109/TVCG.2024.3381453","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-tvcg-20243381453","image_caption":"UMAP embedding of the MNIST dataset with color-coded classes after four iterations of our algorithm (top left), with grid lines (top right), with density background texture (bottom left), and with contour lines (bottom right). ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://arxiv.org/abs/2408.06513","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/H85FqQyR25U&t=1h5m9s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-tvcg/v-tvcg-20243381453/v-tvcg-20243381453_Preview.mp4?token=NVVkX__ZVN0UirCznvSvnrnTguAsZpcIu8o5MqWIO6Q&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"full9","session_room":"Bayshore I","session_room_id":"bayshore1","session_title":"Embeddings and Document Spatialization","session_uid":"v-tvcg","session_youtube_ff_id":"U4x_-kWR6sw","session_youtube_ff_link":"https://youtu.be/U4x_-kWR6sw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/H85FqQyR25U&t=1h5m9s","sessions":["Embeddings and Document Spatialization"],"time_stamp":"2024-10-17T13:30:00Z","title":"De-cluttering Scatterplots with Integral Images","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1056","abstract":"We present FCNR, a fast compressive neural representation for tens of thousands of visualization images under varying viewpoints and timesteps. The existing NeRVI solution, albeit enjoying a high compression ratio, incurs slow speeds in encoding and decoding. Built on the recent advances in stereo image compression, FCNR assimilates stereo context modules and joint context transfer modules to compress image pairs. Our solution significantly improves encoding and decoding speed while maintaining high reconstruction quality and satisfying compression ratio. To demonstrate its effectiveness, we compare FCNR with state-of-the-art neural compression methods, including E-NeRV, HNeRV, NeRVI, and ECSIC. The source code can be found at https://github.com/YunfeiLu0112/FCNR.","accessible_pdf":true,"authors":[{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"ylu25@nd.edu","is_corresponding":true,"name":"Yunfei Lu"},{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"pgu@nd.edu","is_corresponding":false,"name":"Pengfei Gu"},{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"chaoli.wang@nd.edu","is_corresponding":false,"name":"Chaoli Wang"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1056","image_caption":"FCNR is a fast method for compressing a great number of visualization images. It stands out in both encoding and decoding speed, and leads to compressive results while maintains high reconstruction quality using neural representations.","keywords":["Machine Learning Techniques, Image and Video Data"],"open_access_supplemental_link":"https://github.com/YunfeiLu0112/FCNR","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.16369","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/5O_zfoG4xOo&t=0h39m22s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1056/v-short-1056_Preview.mp4?token=_udEvR_-XWuGx7m7Uplz74h9eP8X9f7fwPDsTcHXXL8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"dJGQMkPi44U","session_youtube_ff_link":"https://youtu.be/dJGQMkPi44U","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h39m22s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:21:00Z","title":"FCNR: Fast Compressive Neural Representation of Visualization Images","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1097","abstract":"Visualization tools now commonly present automated insights highlighting salient data patterns, including correlations, distributions, outliers, and differences, among others. While these insights are valuable for data exploration and chart interpretation, users currently only have a binary choice of accepting or rejecting them, lacking the flexibility to refine the system logic or customize the insight generation process. To address this limitation, we present Groot, a prototype system that allows users to proactively specify and refine automated data insights. The system allows users to directly manipulate chart elements to receive insight recommendations based on their selections. Additionally, Groot provides users with a manual editing interface to customize, reconfigure, or add new insights to individual charts and propagate them to future explorations. We describe a usage scenario to illustrate how these features collectively support insight editing and configuration and discuss opportunities for future work, including incorporating Large Language Models (LLMs), improving semantic data and visualization search, and supporting insight management. ","accessible_pdf":true,"authors":[{"affiliations":["University of Maryland, College Park, College Park, United States","Tableau Research, Seattle, United States"],"email":"sgathani@cs.umd.edu","is_corresponding":true,"name":"Sneha Gathani"},{"affiliations":["Tableau Research, Seattle, United States"],"email":"amcrisan@uwaterloo.ca","is_corresponding":false,"name":"Anamaria Crisan"},{"affiliations":["Tableau Research, Palo Alto, United States"],"email":"vsetlur@tableau.com","is_corresponding":false,"name":"Vidya Setlur"},{"affiliations":["Tableau Research, Seattle, United States"],"email":"arjun.srinivasan.10@gmail.com","is_corresponding":false,"name":"Arjun Srinivasan"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1097","image_caption":"GROOT allows users to edit and reconfigure automated data insights by (1) selecting marks in charts to get recommendations of new insights based on the selection, (2) reconfiguring default insights by adjusting the template or insight generation thresholds, (3) adding new custom insights by specifying text templates for insights.","keywords":["Automated data insights, insight reconfiguration, natural language templates"],"open_access_supplemental_link":"https://drive.google.com/file/d/1ZTZsN2YbQDdWGiyhVp9SLaE1q7wF6p4r/view?usp=sharing","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/5O_zfoG4xOo&t=0h47m31s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1097/v-short-1097_Preview.mp4?token=H27dKmOsBJYdAxm_VtiymNi6EZJv0ednyRKeUBzdvrQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1097/v-short-1097_Preview.srt?token=csXPj69gXJiEfmouLtJnaftwINmIPmGJRGcO3eJ6j34&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"pqb9IsoJKWA","session_youtube_ff_link":"https://youtu.be/pqb9IsoJKWA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h47m31s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:30:00Z","title":"Groot: A System for Editing and Configuring Automated Data Insights","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1130","abstract":"Visualization, from simple line plots to complex high-dimensional visual analysis systems, has established itself throughout numerous domains to explore, analyze, and evaluate data. Applying such visualizations in the context of simulation science where High-Performance Computing (HPC) produces ever-growing amounts of data that is more complex, potentially multidimensional, and multi-modal, takes up resources and a high level of technological experience often not available to domain experts. In this work, we present DaVE - a curated database of visualization examples, which aims to provide state-of-the-art and advanced visualization methods that arise in the context of HPC applications. Based on domain- or data-specific descriptors entered by the user, DaVE provides a list of appropriate visualization techniques, each accompanied by descriptions, examples, references, and resources. Sample code, adaptable container templates, and recipes for easy integration in HPC applications can be downloaded for easy access to high-fidelity visualizations. While the database is currently filled with a limited number of entries based on a broad evaluation of needs and challenges of current HPC users, DaVE is designed to be easily extended by experts from both the visualization and HPC communities.","accessible_pdf":true,"authors":[{"affiliations":["RWTH Aachen University, Aachen, Germany"],"email":"koenen@informatik.rwth-aachen.de","is_corresponding":false,"name":"Jens Koenen"},{"affiliations":["RPTU Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"m.petersen@rptu.de","is_corresponding":false,"name":"Marvin Petersen"},{"affiliations":["RPTU Kaiserslautern-Landau, Kaiserslautern, Germany"],"email":"garth@rptu.de","is_corresponding":false,"name":"Christoph Garth"},{"affiliations":["RWTH Aachen University, Aachen, Germany"],"email":"gerrits@vis.rwth-aachen.de","is_corresponding":true,"name":"Tim Gerrits"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1130","image_caption":"Through a modern web interface, DaVE provides access to an extensible database of visualization examples that demonstrate advanced and state-of-the-art visualization methods. Each example comes with descriptions, references and containerized code for an easy deployment on various hardware configurations, ranging from laptops to complex HPC systems.","keywords":["Visualization, Curated Database, High-Performance Computing"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.03188","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/5O_zfoG4xOo&t=0h0m39s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1130/v-short-1130_Preview.mp4?token=K6YauQ7vkIi9XG-JJ7A03jGw53b0BhzhLRRnq9T5mlE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1130/v-short-1130_Preview.srt?token=2m7VfFM6D-3xC9d5AH0hh4IeowanC7WC9xkxLSNXe3U&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"HNepHn1OyEM","session_youtube_ff_link":"https://youtu.be/HNepHn1OyEM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h0m39s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T17:45:00Z","title":"DaVE - A Curated Database of Visualization Examples","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1146","abstract":"Millions of runners rely on smart watches that display running-related metrics such as pace, heart rate and distance for training and racing\u2014mostly with text and numbers. Although research tells us that visualizations are a good alternative to text on smart watches, we know little about how visualizations can help in realistic running scenarios. We conducted a study in which 20 runners completed running-related tasks on an outdoor track using both text and visualizations. Our results show that runners are 1.5 to 8 times faster in completing those tasks with visualizations than with text, prefer visualizations to text, and would use such visualizations while running \u2014 if available on their smart watch.","accessible_pdf":false,"authors":[{"affiliations":["University of Victoria, Victoria, Canada"],"email":"sarinaksj@uvic.ca","is_corresponding":false,"name":"Sarina Kashanj"},{"affiliations":["University of Victoria, Victoira, Canada","Delft University of Technology, Delft, Netherlands"],"email":"xiyao.wang23@gmail.com","is_corresponding":false,"name":"Xiyao Wang"},{"affiliations":["University of Victoria, Victoria, Canada"],"email":"cperin@uvic.ca","is_corresponding":true,"name":"Charles Perin"}],"award":"honorable","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1146","image_caption":"The two Data Page layouts we used to study the effectiveness of visualization for running. The data pages show Elapsed Time (left), Pace (top), Distance (right) and Heart Rate (bottom). Pace, Distance and Heart Rate are represented either with TEXT or with VISUALIZATION. The data page on the left shows Elapsed Time and Heart Rate with TEXT, and Pace and Distance with VISUALIZATION; the data page on the right shows Elapsed Time, Pace and Distance with TEXT, and Heart Rate with VISUALIZATION.","keywords":["Running, Visualization, Smartwatch visualization."],"open_access_supplemental_link":"https://osf.io/q7ha9/?view_only=cd042df71d6a40239ee8472b505facf0","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://osf.io/preprints/osf/2fa56","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/5O_zfoG4xOo&t=0h57m17s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1146/v-short-1146_Preview.mp4?token=NSucXA2-ztbAqr7mjwjaC7HStkykUljMQ-TmRjbaV9w&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"QmWZ3rzzz60","session_youtube_ff_link":"https://youtu.be/QmWZ3rzzz60","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h57m17s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:39:00Z","title":"Visualizations on Smart Watches while Running: It Actually Helps!","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1159","abstract":"With two studies, we assess how different walking trajectories (straight line, circular, and infinity) and speeds (2 km/h, 4 km/h, and 6 km/h) influence the accuracy and response time of participants reading micro visualizations on a smartwatch. We showed our participants common watch face micro visualizations including date, time, weather information, and four complications showing progress charts of fitness data. Our findings suggest that while walking trajectories did not significantly affect reading performance, overall walking activity, especially at high speeds, hurt reading accuracy and, to some extent, response time.","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"fairouz.grioui@vis.uni-stuttgart.de","is_corresponding":true,"name":"Fairouz Grioui"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"research@blascheck.eu","is_corresponding":false,"name":"Tanja Blascheck"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"yaolijie0219@gmail.com","is_corresponding":false,"name":"Lijie Yao"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"petra.isenberg@inria.fr","is_corresponding":false,"name":"Petra Isenberg"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1159","image_caption":"The watch-face stimulus on top of the teaser image shows an example of the three radial charts of fitness data: calories burned, step count, and distance walked, that we asked participants to compare and estimate the percentage of progress. Below, the figure shows three illustrations of the three walking trajectories: Line, Circular, and Infinity-like and the three walking speeds: 2km/h, 4km/h, and 6km/h that participants performed while reading the visualizations on a smartwatch.","keywords":["micro and mobile visualization, smartwatch"],"open_access_supplemental_link":"https://osf.io/u78s6/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.17893","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/5O_zfoG4xOo&t=1h5m31s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1159/v-short-1159_Preview.mp4?token=aJJ1dnPeLmz0Y3QeplQHI53PXlaUJBukFSskvIyX1iQ&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"2DE5LfUsIWA","session_youtube_ff_link":"https://youtu.be/2DE5LfUsIWA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=1h5m31s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:48:00Z","title":"Micro Visualizations on a Smartwatch: Assessing Reading Performance While Walking","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1161","abstract":"Digital twins are an excellent tool to model, visualize, and simulate complex systems, to understand and optimize their operation. In this work, we present the technical challenges of real-time visualization of a digital twin of the Frontier supercomputer. We show the initial prototype and current state of the twin and highlight technical design challenges of visualizing such a large High Performance Computing (HPC) system. The goal is to understand the use of augmented reality as a primary way to extract information and collaborate on digital twins of complex systems. This leverages the spatio-temporal aspect of a 3D representation of a digital twin, with the ability to view historical and real-time telemetry, triggering simulations of a system state and viewing the results, which can be augmented via dashboards for details. Finally, we discuss considerations and opportunities for augmented reality of digital twins of large-scale, parallel computers.","accessible_pdf":false,"authors":[{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"maiterthm@ornl.gov","is_corresponding":true,"name":"Matthias Maiterth"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"brewerwh@ornl.gov","is_corresponding":false,"name":"Wes Brewer"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"dewetd@ornl.gov","is_corresponding":false,"name":"Dane De Wet"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"greenwoodms@ornl.gov","is_corresponding":false,"name":"Scott Greenwood"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"kumarv@ornl.gov","is_corresponding":false,"name":"Vineet Kumar"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"hinesjr@ornl.gov","is_corresponding":false,"name":"Jesse Hines"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"bouknightsl@ornl.gov","is_corresponding":false,"name":"Sedrick L Bouknight"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"wangz@ornl.gov","is_corresponding":false,"name":"Zhe Wang"},{"affiliations":["Hewlett Packard Enterprise, Berkshire, United Kingdom"],"email":"tim.dykes@hpe.com","is_corresponding":false,"name":"Tim Dykes"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"fwang2@ornl.gov","is_corresponding":false,"name":"Feiyi Wang"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1161","image_caption":"Two people standing around a desk, pointing at an augmented reality digital twin of the frontier supercomputer with central energy plant. ","keywords":["Digital Twin, Data Center, Information Representation, Massively Parallel Systems, Operational Data Analytics, Simulation, Augmented Reality"],"open_access_supplemental_link":"https://code.ornl.gov/exadigit/exadigitue5","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/5O_zfoG4xOo&t=0h19m41s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1161/v-short-1161_Preview.mp4?token=ET8CQWbPG-hXwXjUua6NoE0PVGz1MjAU-g1pHkZ33IA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1161/v-short-1161_Preview.srt?token=3zqzNeSMX9qgzyGGV40rq8NrGlal2SYk044O1_DS1fg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"bumoRDi4LsE","session_youtube_ff_link":"https://youtu.be/bumoRDi4LsE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h19m41s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:03:00Z","title":"Visualizing an Exascale Data Center Digital Twin: Considerations, Challenges and Opportunities","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1166","abstract":"Custom animated visualizations of large, complex datasets are helpful across many domains, but they are hard to develop. Much of the difficulty arises from maintaining visualization state across many animated graphical elements that may change in number over time. We contribute Counterpoint, a framework for state management designed to help implement such visualizations in JavaScript. Using Counterpoint, developers can manipulate large collections of marks with reactive attributes that are easy to render in scalable APIs such as Canvas and WebGL. Counterpoint also helps orchestrate the entry and exit of graphical elements using the concept of a rendering \"stage.\" Through a performance evaluation, we show that Counterpoint adds minimal overhead over current high-performance rendering techniques while simplifying implementation. We provide two examples of visualizations created using Counterpoint that illustrate its flexibility and compatibility with other visualization toolkits as well as considerations for users with disabilities. Counterpoint is open-source and available at https://github.com/cmudig/counterpoint.","accessible_pdf":true,"authors":[{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"vsivaram@andrew.cmu.edu","is_corresponding":true,"name":"Venkatesh Sivaraman"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"fje@cmu.edu","is_corresponding":false,"name":"Frank Elavsky"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"domoritz@cmu.edu","is_corresponding":false,"name":"Dominik Moritz"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"adamperer@cmu.edu","is_corresponding":false,"name":"Adam Perer"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1166","image_caption":"Counterpoint is an open-source TypeScript framework that makes it easier to create animated visualizations, such as the ones shown here, using high-performance Web graphics frameworks like Canvas and WebGL.","keywords":["Visualization Toolkits, Animation, Web Interfaces, Software System Structures"],"open_access_supplemental_link":"https://dig.cmu.edu/counterpoint","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/5O_zfoG4xOo&t=0h10m3s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1166/v-short-1166_Preview.mp4?token=HGqEhRrBoE95UDds4tEq-HzpElLW8IjdT04FWh_y_-w&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1166/v-short-1166_Preview.srt?token=dkB9rLeqmfOuUxrGJwnjXMnK5w6U7Oo_UebdTcbLPfI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"4zOVLaUf7po","session_youtube_ff_link":"https://youtu.be/4zOVLaUf7po","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h10m3s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T17:54:00Z","title":"Counterpoint: Orchestrating Large-Scale Custom Animated Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1248","abstract":"Statistical practices such as building regression models or running hypothesis tests rely on following rigorous procedures of steps and verifying assumptions on data to produce valid results. However, common statistical tools do not verify users\u2019 decision choices and provide low-level statistical functions without instructions on the whole analysis practice. Users can easily misuse analysis methods, potentially decreasing the validity of results. To address this problem, we introduce GuidedStats, an interactive interface within computational notebooks that encapsulates guidance, models, visualization, and exportable results into interactive workflows. It breaks down typical analysis processes, such as linear regression and two-sample T-tests, into interactive steps supplemented with automatic visualizations and explanations for step-wise evaluation. Users can iterate on input choices to refine their models, while recommended actions and exports allow the user to continue their analysis in code. Case studies show how GuidedStats offers valuable instructions for conducting fluid statistical analyses while finding possible assumption violations in the underlying data, supporting flexible and accurate statistical analyses.","accessible_pdf":true,"authors":[{"affiliations":["New York University, New York, United States"],"email":"yz9381@nyu.edu","is_corresponding":true,"name":"Yuqi Zhang"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"adamperer@cmu.edu","is_corresponding":false,"name":"Adam Perer"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"willepp@cmu.edu","is_corresponding":false,"name":"Will Epperson"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1248","image_caption":"GuidedStats assists users with statistical analyses through guided workflows. It automatically verifies assumptions and provides actionable suggestions. At the current step, the user is checking assumptions, with the explanation offering more details about the relevant statistical concepts.","keywords":["Data science tools, computational notebooks, analytical guidance"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/5O_zfoG4xOo&t=0h29m27s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1248/v-short-1248_Preview.mp4?token=DOEPbtaqFuDv1a5br6vnEiqyvBoppG_qx-z7anTi-Xc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1248/v-short-1248_Preview.srt?token=j0uSYby6HvG32XaP1PEpcjXJYshNz4jbHNAS94B7eZY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short1","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: System design","session_uid":"v-short","session_youtube_ff_id":"kEa12neWJfQ","session_youtube_ff_link":"https://youtu.be/kEa12neWJfQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/5O_zfoG4xOo&t=0h29m27s","sessions":["Short Papers: System design"],"time_stamp":"2024-10-16T18:12:00Z","title":"Guided Statistical Workflows with Interactive Explanations and Assumption Checking","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1040","abstract":"From dirty data to intentional deception, there are many threats to the validity of data-driven decisions. Making use of data, especially new or unfamiliar data, therefore requires a degree of trust or verification. How is this trust established? In this paper, we present the results of a series of interviews with both producers and consumers of data artifacts (outputs of data ecosystems like spreadsheets, charts, and dashboards) aimed at understanding strategies and obstacles to building trust in data. We find a recurring need, but lack of existing standards, for data validation and verification, especially among data consumers. We therefore propose a set of data guards: methods and tools for fostering trust in data artifacts.","accessible_pdf":true,"authors":[{"affiliations":["Tableau Research, Seattle, United States"],"email":"nicole.sultanum@gmail.com","is_corresponding":false,"name":"Nicole Sultanum"},{"affiliations":["Tableau Research, Seattle, United States"],"email":"bromley.denny@gmail.com","is_corresponding":false,"name":"Dennis Bromley"},{"affiliations":["Northeastern University, Portland, United States"],"email":"m.correll@northeastern.edu","is_corresponding":false,"name":"Michael Correll"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1040","image_caption":"Data-driven decision making is ostensibly more common now than ever, but without specific points of trust in the data handling process, people often fall back on ad hoc decision justification mechanisms. Driven by user interviews of both data producers and data consumers, Data Guards is a set of seven proposed strategies for improving users' trust in data to help them make more confident data-driven decisions.","keywords":["Data visualization, data cleaning, data quality, trust"],"open_access_supplemental_link":"https://osf.io/ynm57/?view_only=572a886b5b154c8298c8b66ba170c632","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.14042","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/O3wdZMpMs-I&t=0h10m45s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1040/v-short-1040_Preview.mp4?token=I0DHvU5-20n2Fp6mZnDJ99636Y0rfqpqfaHfF2OYrcE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"lGC-JrOjFTo","session_youtube_ff_link":"https://youtu.be/lGC-JrOjFTo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h10m45s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:09:00Z","title":"Data Guards: Challenges and Solutions for Fostering Trust in Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1114","abstract":"As visualization literacy and its implications gain prominence, we need effective methods to prepare students for the variety of visualizations in an increasingly data-driven world. Recently, the potential of comics has been recognized in various data visualization contexts, including educational settings. We describe the development of a workshop in which we use our ``comic construction kit'' as a tool for students to understand various data visualization techniques through an interactive creative approach of creating explanatory comics. We report on our insights from holding eight workshops with high school students and teachers, university students, and lecturers, aiming to enhance the landscape of hands-on visualization activities that can enrich the visualization classroom. The comic construction kit and all supplemental materials are open source under a CC-BY license and available at https://fhstp.github.io/comixplain/vis4schools.html.","accessible_pdf":true,"authors":[{"affiliations":["St. P\u00f6lten University of Applied Sciences, St. P\u00f6lten, Austria"],"email":"magdalena.boucher@fhstp.ac.at","is_corresponding":true,"name":"Magdalena Boucher"},{"affiliations":["St. Poelten University of Applied Sciences, St. Poelten, Austria"],"email":"christina.stoiber@fhstp.ac.at","is_corresponding":false,"name":"Christina Stoiber"},{"affiliations":["School of Informatics, Communications and Media, Hagenberg im M\u00fchlkreis, Austria"],"email":"mandy.keck@fh-hagenberg.at","is_corresponding":false,"name":"Mandy Keck"},{"affiliations":["St. Poelten University of Applied Sciences, St. Poelten, Austria"],"email":"victor.oliveira@fhstp.ac.at","is_corresponding":false,"name":"Victor Adriel de Jesus Oliveira"},{"affiliations":["St. Poelten University of Applied Sciences, St. Poelten, Austria"],"email":"wolfgang.aigner@fhstp.ac.at","is_corresponding":false,"name":"Wolfgang Aigner"}],"award":"honorable","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1114","image_caption":"A preview of some customizeable character stickers and pre-printed visualizations from our comic construction kit, with a comic example by a student.","keywords":["data comics, storytelling, visualization education, visualization literacy, visualization activities"],"open_access_supplemental_link":"https://fhstp.github.io/comixplain/vis4schools.html","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://phaidra.fhstp.ac.at/o:5588","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/O3wdZMpMs-I&t=1h7m6s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"CopQJYd6mh0","session_youtube_ff_link":"https://youtu.be/CopQJYd6mh0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=1h7m6s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T17:03:00Z","title":"The Comic Construction Kit: An Activity for Students to Learn and Explain Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1117","abstract":"Geovisualizations are powerful tools for exploratory spatial analysis, enabling sighted users to discern patterns, trends, and relationships within geographic data. However, these visual tools have remained largely inaccessible to screen-reader users. We introduce AltGeoViz, a new interactive geovisualization approach that dynamically generates alt-text descriptions based on the user's current map view, providing voiceover summaries of spatial patterns and descriptive statistics.In a remote user study with five screen-reader users, we found that participants were able to interact with spatial data in previously infeasible ways, demonstrated a clear understanding of data summaries and their location context, and could synthesize spatial understandings of their explorations. Moreover, we identified key areas for improvement, such as the addition of spatial navigation controls and comparative analysis features.","accessible_pdf":true,"authors":[{"affiliations":["University of Washington, Seattle, United States"],"email":"chuchuli@cs.washington.edu","is_corresponding":true,"name":"Chu Li"},{"affiliations":["University of Washington, Seattle, United States"],"email":"ypang2@cs.washington.edu","is_corresponding":false,"name":"Rock Yuren Pang"},{"affiliations":["University of Washington, Seattle, United States"],"email":"asharif@cs.washington.edu","is_corresponding":false,"name":"Ather Sharif"},{"affiliations":["University of Washington, Seattle, United States"],"email":"chheda@cs.washington.edu","is_corresponding":false,"name":"Arnavi Chheda-Kothary"},{"affiliations":["University of Washington, Seattle, United States"],"email":"jheer@uw.edu","is_corresponding":false,"name":"Jeffrey Heer"},{"affiliations":["University of Washington, Seattle, United States"],"email":"jonf@cs.uw.edu","is_corresponding":false,"name":"Jon E. Froehlich"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1117","image_caption":"AltGeoViz enables screen-reader users to interact with dynamic geovisualizations. The left image shows the initial view, with the title, a summary of the general spatial pattern, and data extrema and averages presented to the user.The center image shows how as the user moves and zooms, the information is updated, and they can hear the boundary of their current viewport. The right image demonstrates how the data can be shown at different geographic units, such as state or county level, depending on the zoom level. See the provided video for a full demonstration of the AltGeoViz functionality. ","keywords":["dynamic geovisualization, accessibility, alt-text, screen-reader"],"open_access_supplemental_link":"https://github.com/makeabilitylab/altgeoviz","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2406.13853","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/O3wdZMpMs-I&t=0h20m0s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1117/v-short-1117_Preview.mp4?token=eBWMER4GnSZFHZaWimXzfNCKGTrVOVyHY6Wo_kRg5yI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1117/v-short-1117_Preview.srt?token=SrKdWP8Up_PgHYSD-3fOBsOlspY3U6SURAVdsDl8oZA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"S6K-w6Kn090","session_youtube_ff_link":"https://youtu.be/S6K-w6Kn090","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h20m0s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:18:00Z","title":"AltGeoViz: Facilitating Accessible Geovisualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1126","abstract":"Psychological research often involves understanding psychological constructs through conducting factor analysis on data collected by a questionnaire, which can comprise hundreds of questions. Without interactive systems for interpreting factor models, researchers are frequently exposed to subjectivity, potentially leading to misinterpretations or overlooked crucial information. This paper introduces FAVis, a novel interactive visualization tool designed to aid researchers in interpreting and evaluating factor analysis results. FAVis enhances the understanding of relationships between variables and factors by supporting multiple views for visualizing factor loadings and correlations, allowing users to analyze information from various perspectives. The primary feature of FAVis is to enable users to set optimal thresholds for factor loadings to balance clarity and information retention. FAVis also allows users to assign tags to variables, enhancing the understanding of factors by linking them to their associated psychological constructs. Our user study demonstrates the utility of FAVis in various tasks.","accessible_pdf":true,"authors":[{"affiliations":["University of Notre Dame, Notre Dame, United States","University of Notre Dame, Notre Dame, United States"],"email":"ylu22@nd.edu","is_corresponding":true,"name":"Yikai Lu"},{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"chaoli.wang@nd.edu","is_corresponding":false,"name":"Chaoli Wang"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1126","image_caption":"We propose FAVis (https://luyikei.github.io/favis/). (A) Matrix view shows a factor loadings matrix; (B) Network view visualizes cross-loadings most effectively; (C) Parallel-coordinates view shows factor loadings for each variable/factor allows for selecting variables/factors within a range; (D) Tag view shows the relevance of tags for each factor by counting tags annotated for variables based on a theory; (E) Word cloud view helps interpret factors by correlating fonts with the values of factor loadings; (F) Threshold view controls the number of factor loadings shown in different views; (G) Factor correlation view shows the network of factor correlations; (H) Top bar for filtering.","keywords":["Machine Learning, Statistics, Modelling, and Simulation Applications, Coordinated and Multiple Views, High-dimensional Data"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.14072","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/O3wdZMpMs-I&t=0h1m0s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1126/v-short-1126_Preview.mp4?token=Py6oaIPs8w8gzgtsu2sfDDyy4zLBsY5bFKVNS7lGboM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1126/v-short-1126_Preview.srt?token=MVgNa4kGGix3ZpPG_yBLmTLC9Y_QweqCRI1O2NW2RNc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"USpKXLjKe1A","session_youtube_ff_link":"https://youtu.be/USpKXLjKe1A","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h1m0s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:00:00Z","title":"FAVis: Visual Analytics of Factor Analysis for Psychological Research","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1185","abstract":"The visualization and interactive exploration of geo-referenced networks poses challenges if the network's nodes are not evenly distributed. Our approach proposes new ways of realizing animated transitions for exploring such networks from an ego-perspective. We aim to reduce the required screen estate while maintaining the viewers' mental map of distances and directions. A preliminary study provides first insights of the comprehensiveness of animated geographic transitions regarding directional relationships between start and end point in different projections. Two use cases showcase how ego-perspective graph exploration can be supported using less screen space than previous approaches.","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"max@mumintroll.org","is_corresponding":true,"name":"Max Franke"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"samuel.beck@vis.uni-stuttgart.de","is_corresponding":false,"name":"Samuel Beck"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"steffen.koch@vis.uni-stuttgart.de","is_corresponding":false,"name":"Steffen Koch"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1185","image_caption":"Our approach supports the exploration of relations in geo-referenced networks with animated zoom-and-pan transitions. The figure shows such a transition realized as a two-point equidistant projection. The geodetic line (blue arrow) between the start and end node is projected without distortion. Example views during the animated transition are shown to the left and right of the map. Their respective coverage is indicated by red circles.","keywords":["Geographical projection, geo-referenced graph, degree-of-interest function, ego-perspective exploration."],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/O3wdZMpMs-I&t=0h48m48s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1185/v-short-1185_Preview.mp4?token=nAu7NU25mRuUoQFmO5uEegIKXfgjvuDrFomvfMeej-g&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1185/v-short-1185_Preview.srt?token=Gc4t4Ptp3CFSPRpeiFS6pC0a-gqH_JjwfT0S8OlSI8s&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"dn3WTXLOdUE","session_youtube_ff_link":"https://youtu.be/dn3WTXLOdUE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h48m48s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:45:00Z","title":"Two-point Equidistant Projection and Degree-of-interest Filtering for Smooth Exploration of Geo-referenced Networks","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1191","abstract":"To enable data-driven decision-making across organizations, data professionals need to share insights with their colleagues in context-appropriate communication channels. Many of their colleagues rely on data but are not themselves analysts; furthermore, their colleagues are reluctant or unable to use dedicated analytical applications or dashboards, and they expect communication to take place within threaded collaboration platforms such as Slack or Microsoft Teams. In this paper, we introduce a set of six strategies for adapting content from business intelligence (BI) dashboards into appropriate formats for sharing on collaboration platforms, formats that we refer to as dashboard snapshots. Informed by prior studies of enterprise communication around data, these strategies go beyond redesigning or restyling by considering varying levels of data literacy across an organization, introducing affordances for self-service question-answering, and anticipating the post-sharing lifecycle of data artifacts. These strategies involve the use of templates that are matched to common communicative intents, serving to reduce the workload of data professionals. We contribute a formal representation of these strategies and demonstrate their applicability in a comprehensive enterprise communication scenario featuring multiple stakeholders that unfolds over the span of months. ","accessible_pdf":true,"authors":[{"affiliations":["Northwestern University, Evanston, United States"],"email":"hyeokkim2024@u.northwestern.edu","is_corresponding":true,"name":"Hyeok Kim"},{"affiliations":["Tableau Research, Seattle, United States"],"email":"arjun.srinivasan.10@gmail.com","is_corresponding":false,"name":"Arjun Srinivasan"},{"affiliations":["Tableau Research, Seattle, United States"],"email":"mbrehmer@uwaterloo.ca","is_corresponding":false,"name":"Matthew Brehmer"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1191","image_caption":"A pipeline for making selections from a dashboard, retargeting them as components, combining the components into a dashboard snapshot, sharing and updating the snapshot on a collaboration platform.","keywords":["Collaboration visualization, visualization retargeting, responsive visualization design, business intelligence"],"open_access_supplemental_link":"https://dashboard-snapshot.github.io","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.00242","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/O3wdZMpMs-I&t=0h58m35s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1191/v-short-1191_Preview.mp4?token=hdtHw2ttp4zyinx0e-hcDqPYjbBGrd__GppnixInt8Q&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1191/v-short-1191_Preview.srt?token=Hi010g9WmvNn2xupgck3Jp3ivcfflWCT5Puv7-WviZY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"SLBqiNRU_NY","session_youtube_ff_link":"https://youtu.be/SLBqiNRU_NY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h58m35s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:54:00Z","title":"Bringing Data into the Conversation: Adapting Content from Business Intelligence Dashboards for Threaded Collaboration Platforms","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1264","abstract":"The Local Moran's I statistic is a valuable tool for identifying localized patterns of spatial autocorrelation. Understanding these patterns is crucial in spatial analysis, but interpreting the statistic can be difficult. To simplify this process, we introduce three novel visualizations that enhance the interpretation of Local Moran's I results. These visualizations can be interactively linked to one another, and to established visualizations, to offer a more holistic exploration of the results. We provide a JavaScript library with implementations of these new visual elements, along with a web dashboard that demonstrates their integrated use. ","accessible_pdf":false,"authors":[{"affiliations":["NIH, Rockville, United States","Queen's University, Belfast, United Kingdom"],"email":"masonlk@nih.gov","is_corresponding":true,"name":"Lee Mason"},{"affiliations":["Queen's University Belfast , Belfast , United Kingdom"],"email":"b.hicks@qub.ac.uk","is_corresponding":false,"name":"Bl\u00e1naid Hicks"},{"affiliations":["National Institutes of Health, Rockville, United States"],"email":"jonas.dealmeida@nih.gov","is_corresponding":false,"name":"Jonas S Almeida"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1264","image_caption":"A screenshot of an interactive dashboard featuring the three Local Moran's I plot designs proposed in our paper.","keywords":["Spatial, spatial clustering, spatial autocorrelation, geospatial, GIS, interactive visualization, visual analytics, Moran's I, local indicators of spatial association"],"open_access_supplemental_link":"https://github.com/episphere/moranplot","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.02418","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/O3wdZMpMs-I&t=0h39m6s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1264/v-short-1264_Preview.mp4?token=bM9IpDfGMhh8fPX0WeTL_qJUJthGTqfQxwl-D30S9RM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1264/v-short-1264_Preview.srt?token=h1d3DNeWPE2VfjyPIc-kozlja0tfD_Yik8VdjSr892g&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"E1nVUBZigfY","session_youtube_ff_link":"https://youtu.be/E1nVUBZigfY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h39m6s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:36:00Z","title":"Demystifying Spatial Dependence: Interactive Visualizations for Interpreting Local Spatial Autocorrelation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1285","abstract":"This study examines the impacts of public health communications visualizing risk disparities between racial and other social groups. It compares the effects of traditional bar charts to an alternative design emphasizing geographic variability with differing annotations and jitter plots. Whereas both visualization designs increased perceived vulnerability, behavioral intent, and policy support, the geo-emphasized charts were significantly more effective in reducing personal attribution biases. The findings also reveal emotionally taxing experiences for chart viewers from marginalized communities. This work suggests a need for strategic reevaluation of visual communication tools in public health to enhance understanding and engagement without reinforcing stereotypes or emotional distress.","accessible_pdf":false,"authors":[{"affiliations":["3iap, Raleigh, United States"],"email":"eli@3iap.com","is_corresponding":true,"name":"Eli Holder"},{"affiliations":["Northeastern University, Boston, United States","University of California Merced, Merced, United States"],"email":"l.padilla@northeastern.edu","is_corresponding":false,"name":"Lace M. Padilla"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1285","image_caption":"Bars and geography-emphasized chart (geo-emph) showing crude mortality rates for heart disease. The geo-emph chart includes the same overall mortality rates but uses annotations and jitter dots of U.S. states to emphasize within-group differences.","keywords":["Health Equity, Public Health Communication"],"open_access_supplemental_link":"https://osf.io/emb8y/","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/O3wdZMpMs-I&t=0h30m20s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1285/v-short-1285_Preview.mp4?token=UMeO7m2MzfNLyPgPhuBS9ecNtcw7b_GxGKH3rMU7wNE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Analytics and Applications","session_uid":"v-short","session_youtube_ff_id":"tUzqEEJJyKw","session_youtube_ff_link":"https://youtu.be/tUzqEEJJyKw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/O3wdZMpMs-I&t=0h30m20s","sessions":["Short Papers: Analytics and Applications"],"time_stamp":"2024-10-17T16:27:00Z","title":"\"Must Be a Tuesday\": Affect, Attribution, and Geographic Variability in Equity-Oriented Visualizations of Population Health Disparities","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1047","abstract":"In the rapidly evolving field of deep learning, traditional methodologies for designing models predominantly rely on code-based frameworks. While these approaches provide flexibility, they create a significant barrier to entry for non-experts and obscure the immediate impact of architectural decisions on model performance. In response to this challenge, recent no-code approaches have been developed with the aim of enabling easy model development through graphical interfaces. However, both traditional and no-code methodologies share a common limitation that the inability to predict model outcomes or identify issues without executing the model. To address this limitation, we introduce an intuitive visual feedback-based no-code approach to visualize and analyze deep learning models during the design phase. This approach utilizes dataflow-based visual programming with dynamic visual encoding of model architecture. A user study was conducted with deep learning developers to demonstrate the effectiveness of our approach in enhancing the model design process, improving model understanding, and facilitating a more intuitive development experience. The findings of this study suggest that real-time architectural visualization significantly contributes to more efficient model development and a deeper understanding of model behaviors.","accessible_pdf":true,"authors":[{"affiliations":["VIENCE Inc., Seoul, Korea, Republic of","Korea University, Seoul, Korea, Republic of"],"email":"juny0603@gmail.com","is_corresponding":true,"name":"JunYoung Choi"},{"affiliations":["VIENCE Inc., Seoul, Korea, Republic of"],"email":"wings159@vience.co.kr","is_corresponding":false,"name":"Sohee Park"},{"affiliations":["Korea University, Seoul, Korea, Republic of"],"email":"hellenkoh@gmail.com","is_corresponding":false,"name":"GaYeon Koh"},{"affiliations":["VIENCE Inc., Seoul, Korea, Republic of"],"email":"k0seo0330@vience.co.kr","is_corresponding":false,"name":"Youngseo Kim"},{"affiliations":["VIENCE Inc., Seoul, Korea, Republic of","Korea University, Seoul, Korea, Republic of"],"email":"wkjeong@korea.ac.kr","is_corresponding":false,"name":"Won-Ki Jeong"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1047","image_caption":"An example of proofreading of structural issues in a deep learning model (U-Net) using a proposed visual feedback-based no-code approach, and an example of the conventional method (code-based) corresponding to the errors present in the model.","keywords":["Deep learning, visual programming, explainable AI."],"open_access_supplemental_link":"https://vience.io/vience-canvas/mlops/sample","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/L9rtmx-1eyM&t=0h39m55s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1047/v-short-1047_Preview.mp4?token=CMcl7ui0rQ393UyFOML-kB2MqUGiK4UTVazthQ6Lrtw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1047/v-short-1047_Preview.srt?token=i2_eLL9wzYoVKRhFXnyAoQv3qoo_n0tZ5v9c3rqE-zk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"79um-yl_rvU","session_youtube_ff_link":"https://youtu.be/79um-yl_rvU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h39m55s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:21:00Z","title":"Intuitive Design of Deep Learning Models through Visual Feedback","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1058","abstract":"Semantic interaction (SI) in Dimension Reduction (DR) of images allows users to incorporate feedback through direct manipulation of the 2D positions of images. Through interaction, users specify a set of pairwise relationships that the DR should aim to capture. Existing methods for images incorporate feedback into the DR through feature weights on abstract embedding features. However, if the original embedding features do not suitably capture the users\u2019 task then the DR cannot either. We propose ImageSI, an SI method for image DR that incorporates user feedback directly into the image model to update the underlying embeddings, rather than weighting them. In doing so, ImageSI ensures that the embeddings suitably capture the features necessary for the task so that the DR can subsequently organize images using those features. We present two variations of ImageSI using different loss functions - ImageSI_MDS\u22121 , which prioritizes the explicit pairwise relationships from the interaction and ImageSI_Triplet, which prioritizes clustering, using the interaction to define groups of images. Finally, we present a usage scenario and a simulation-based evaluation to demonstrate the utility of ImageSI and compare it to current methods.","accessible_pdf":true,"authors":[{"affiliations":["Vriginia Tech, Blacksburg, United States"],"email":"jiayuelin@vt.edu","is_corresponding":false,"name":"Jiayue Lin"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"rfaust1@tulane.edu","is_corresponding":true,"name":"Rebecca Faust"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"north@vt.edu","is_corresponding":false,"name":"Chris North"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1058","image_caption":"An example using a collection of images of sharks and snakes. We want the dimension reduction (DR) to organize images based on the feature \"open mouth\" vs \"closed mouth\". (A) shows the initial projection, with added contours to highlight the locations of images with open mouths (yellow) and closed mouths (blue). The DR is not able to identify the open vs closed mouth feature. (B) illustrates the user\u2019s interaction to convey this feature. (C) shows the DR after using ImageSI to update the embeddings. The DR now captures this feature much better than in it did with the original embeddings. ","keywords":["Semantic Interaction, Dimension Reduction"],"open_access_supplemental_link":"https://osf.io/m2wdf/?view_only=3b2f851592874ac791ad0ba5bc809774","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/L9rtmx-1eyM&t=0h0m45s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1058/v-short-1058_Preview.mp4?token=DiJFwOd1DL-Y3YZiGI_igjZZaP9lc9UQl3GxZuDqyok&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1058/v-short-1058_Preview.srt?token=U3bxJhzPWCwU3zWDa-ETnp5-cAETvSqcvLtlycpJ9YA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"hPRtueM5Aw4","session_youtube_ff_link":"https://youtu.be/hPRtueM5Aw4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h0m45s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T17:45:00Z","title":"ImageSI: Semantic Interaction for Deep Learning Image Projections","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1064","abstract":"Large Language Models (LLMs) have demonstrated remarkable versatility in visualization authoring, but often generate suboptimal designs that are invalid or fail to adhere to design guidelines for effective visualization. We present Bavisitter, a natural language interface that integrates established visualization design guidelines into LLMs.Based on our survey on the design issues in LLM-generated visualizations, Bavisitter monitors the generated visualizations during a visualization authoring dialogue to detect an issue. When an issue is detected, it intervenes in the dialogue, suggesting possible solutions to the issue by modifying the prompts. We also demonstrate two use cases where Bavisitter detects and resolves design issues from the actual LLM-generated visualizations.","accessible_pdf":false,"authors":[{"affiliations":["Sungkyunkwan University, Suwon, Korea, Republic of"],"email":"jiwnchoi@skku.edu","is_corresponding":true,"name":"Jiwon Choi"},{"affiliations":["Sungkyunkwan University, Suwon, Korea, Republic of"],"email":"dlwodnd00@skku.edu","is_corresponding":false,"name":"Jaeung Lee"},{"affiliations":["Sungkyunkwan University, Suwon, Korea, Republic of"],"email":"jmjo@skku.edu","is_corresponding":false,"name":"Jaemin Jo"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1064","image_caption":"Bavisitter\u2019s visualization authoring workflow. A) The user requests a visualization to an LLM by prompting \u201cShow me the average yield by site.\u201d B) The LLM generates an ineffective visualization design that uses a connection mark to encode the categorical attribute on the x-axis. C) Bavisitter detects the design issue in the generated visualization and gives feedback to the LLM by modifying the original prompt, e.g., appending \u201cChange mark to bar\u201d. As a result, the user can author visualization designs that conform to known design guidelines and knowledge while exploiting the flexibility that the LLM provides. ","keywords":["Automated Visualization, Visualization Tools, Large Language Model."],"open_access_supplemental_link":"https://github.com/jiwnchoi/Bavisitter","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/L9rtmx-1eyM&t=0h57m33s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1064/v-short-1064_Preview.mp4?token=5RpdXHt32iGGi7RTjNMIT0xavvRbhHTuPaoWv7omPIo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1064/v-short-1064_Preview.srt?token=276KbpWPG1SauiOrcfVnuu8YnxSc5r4PQ1Z5BVO4iL4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"yRnmq_TZ2FU","session_youtube_ff_link":"https://youtu.be/yRnmq_TZ2FU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h57m33s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:39:00Z","title":"Bavisitter: Integrating Design Guidelines into Large Language Models for Visualization Authoring","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1089","abstract":"In healthcare, AI techniques are widely used for tasks like risk assessment and anomaly detection. Despite AI's potential as a valuable assistant, its role in complex medical data analysis often oversimplifies human-AI collaboration dynamics. To address this, we collaborated with a local hospital, engaging six physicians and one data scientist in a formative study. From this collaboration, we propose a framework integrating two-phase interactive visualization systems: one for Human-Led, AI-Assisted Retrospective Analysis and another for AI-Mediated, Human-Reviewed Iterative Modeling. This framework aims to enhance understanding and discussion around effective human-AI collaboration in healthcare. ","accessible_pdf":false,"authors":[{"affiliations":["ShanghaiTech University, Shanghai, China","ShanghaiTech University, Shanghai, China"],"email":"ouyy@shanghaitech.edu.cn","is_corresponding":true,"name":"Yang Ouyang"},{"affiliations":["University of Illinois at Urbana-Champaign, Champaign, United States","University of Illinois at Urbana-Champaign, Champaign, United States"],"email":"chenyang.zhang@gatech.edu","is_corresponding":false,"name":"Chenyang Zhang"},{"affiliations":["ShanghaiTech University, Shanghai, China","ShanghaiTech University, Shanghai, China"],"email":"wanghe1@shanghaitech.edu.cn","is_corresponding":false,"name":"He Wang"},{"affiliations":["Zhongshan Hospital Fudan University, Shanghai, China","Zhongshan Hospital Fudan University, Shanghai, China"],"email":"15301050137@fudan.edu.cn","is_corresponding":false,"name":"Tianle Ma"},{"affiliations":["Zhongshan Hospital Fudan University, Shanghai, China","Zhongshan Hospital Fudan University, Shanghai, China"],"email":"cjiang_fdu@yeah.net","is_corresponding":false,"name":"Chang Jiang"},{"affiliations":["Zhongshan Hospital Fudan University, Shanghai, China","Zhongshan Hospital Fudan University, Shanghai, China"],"email":"522649732@qq.com","is_corresponding":false,"name":"Yuheng Yan"},{"affiliations":["Zhongshan Hospital Fudan University, Shanghai, China","Zhongshan Hospital Fudan University, Shanghai, China"],"email":"yan.zuoqin@zs-hospital.sh.cn","is_corresponding":false,"name":"Zuoqin Yan"},{"affiliations":["Hong Kong University of Science and Technology, Hong Kong, Hong Kong","Hong Kong University of Science and Technology, Hong Kong, Hong Kong"],"email":"mxj@cse.ust.hk","is_corresponding":false,"name":"Xiaojuan Ma"},{"affiliations":["Southeast University, Nanjing, China","Southeast University, Nanjing, China"],"email":"cshiag@connect.ust.hk","is_corresponding":false,"name":"Chuhan Shi"},{"affiliations":["ShanghaiTech University, Shanghai, China","ShanghaiTech University, Shanghai, China"],"email":"liquan@shanghaitech.edu.cn","is_corresponding":false,"name":"Quan Li"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1089","image_caption":"System overview: Phase I includes (A) Cohort View for understanding drug event and disease progression relationships, (B) Patient Projection View to explore specific patient cohort characteristics, and (C) Medical Event View for detailed visualization of patient medical events. Phase II comprises (D) Modeling View for iterative AI model development and performance evaluation, and (E) Logs View for maintaining iteration records of models and associated data.","keywords":["Role Transfer, Hormone-related Medical Records, Visual Analytics, Machine Learning"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/L9rtmx-1eyM&t=0h19m42s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1089/v-short-1089_Preview.mp4?token=SF79zbkXm7K6OO_TFS20wJPZP4RNJ9eNFhQfShoogXk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1089/v-short-1089_Preview.srt?token=9Nik6uy2GwUw6bX8pcyInh0NEgvJlDcJfnZ78Jha6Mw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"_T6AUyLBmY4","session_youtube_ff_link":"https://youtu.be/_T6AUyLBmY4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h19m42s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:03:00Z","title":"A Two-Phase Visualization System for Continuous Human-AI Collaboration in Sequelae Analysis and Modeling","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1177","abstract":"The proliferation of misleading visualizations online, particularly during critical events like public health crises and elections, poses a significant risk of misinformation. This work investigates the capability of GPT-4 models (4V, 4o, and 4o mini) to detect misleading visualizations. Utilizing a dataset of tweet-visualization pairs with various visual misleaders, we tested these models under four experimental conditions with different levels of guidance. Our results demonstrate that GPT-4 models can detect misleading visualizations with moderate accuracy without prior training (naive zero-shot) and that performance considerably improves by providing the model with the definitions of misleaders (guided zero-shot). Our results indicate that a single prompt engineering technique does not necessarily yield the best results for all types of misleaders. We found that guided few-shot was more effective for reasoning misleaders, while guided zero-shot performed better for design misleaders. This study underscores the feasibility of using large vision-language models to combat misinformation and emphasizes the importance of optimizing prompt engineering to enhance detection accuracy.","accessible_pdf":true,"authors":[{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"jhalexander@umass.edu","is_corresponding":false,"name":"Jason Huang Alexander"},{"affiliations":["University of Masssachusetts Amherst, Amherst, United States"],"email":"phnanda@umass.edu","is_corresponding":false,"name":"Priyal H Nanda"},{"affiliations":["Northeastern University, Boston, United States"],"email":"yangkc@iu.edu","is_corresponding":false,"name":"Kai-Cheng Yang"},{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"asarv@cs.umass.edu","is_corresponding":false,"name":"Ali Sarvghad"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1177","image_caption":"We evaluated the accuracy of three OpenAI GPT-4 models in detecting misleading visualizations. Our findings suggest that this approach could serve as a valuable complementary method for addressing misleading visualizations.","keywords":["Misleading visualizations, GPT-4, large vision language model, misinformation"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/L9rtmx-1eyM&t=0h29m21s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1177/v-short-1177_Preview.mp4?token=uEnZBJap4zJdsEMot4L6tPC2sUvFO2MLDPr5nGJ0LTc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1177/v-short-1177_Preview.srt?token=eFl3tdQd8M-ElVVmSy-kgMYVuPmMMSimsp_USszqngs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"dUwRzvfPmaI","session_youtube_ff_link":"https://youtu.be/dUwRzvfPmaI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h29m21s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:12:00Z","title":"Can GPT-4 Models Detect Misleading Visualizations?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1186","abstract":"Data visualizations help extract insights from datasets, but reaching these insights requires decomposing high level goals into low-level analytic tasks that can be complex due to varying degrees of data literacy and visualization experience. Recent advancements in large language models (LLMs) have shown promise for lowering barriers for users to achieve tasks such as writing code and may likewise facilitate visualization insight. Scalable Vector Graphics (SVG), a text-based image format common in data visualizations, matches well with the text sequence processing of transformer-based LLMs. In this paper, we explore the capability of LLMs to perform 10 low-level visual analytic tasks defined by Amar, Eagan, and Stasko directly on SVG-based visualizations. Using zero-shot prompts, we instruct the models to provide responses or modify the SVG code based on given visualizations. Our findings demonstrate that LLMs can effectively modify existing SVG visualizations for some tasks like Cluster but perform poorly on tasks requiring mathematical operations like Compute Derived Value. We also discovered that LLM performance can vary based on factors such as the number of data points, the presence of value labels, and the chart type. Our findings contribute to gauging the general capabilities of LLMs and highlight the need for further exploration and development to fully harness their potential in supporting visual analytic tasks.","accessible_pdf":true,"authors":[{"affiliations":["Brown University, Providence, United States"],"email":"leooooxzz@gmail.com","is_corresponding":true,"name":"Zhongzheng Xu"},{"affiliations":["Emory University, Atlanta, United States"],"email":"emily.wall@emory.edu","is_corresponding":false,"name":"Emily Wall"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1186","image_caption":"The image is an illustration of the study design of the paper Exploring the Capability of LLMs in Performing Low-Level Visual Analytic Tasks on SVG Data Visualizations. This figure consists of three main components: Plot Type, Plot Difficulty, and Low-level Visual Analytics Tasks. Plot Types include Scatter, Line, and Bar charts, all in SVG format. Plot Difficulty is divided into Small Labeled, Small Unlabeled, Medium Labeled, and Medium Unlabeled, with 20 sets of each type. Low-level Visual Analytics Tasks include Retrieve Value, Filter, Compute Derived Value, Find Extremum, Sort, Determine Range, Characterize Distribution, Find Anomalies, Cluster, and Correlate. ","keywords":["Data Visualization, Large Language Models (LLM), Visual Analytics Tasks, Support Vector Graphics (SVG)"],"open_access_supplemental_link":"https://github.com/lebretou/SVG_taxonomy","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/pdf/2404.19097","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/L9rtmx-1eyM&t=1h6m29s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1186/v-short-1186_Preview.mp4?token=uWPsx3kUTnpqLdnhBYyj2x3p9kqHL8X3VG8HHeIsDvY&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"oEOBdI3DxCk","session_youtube_ff_link":"https://youtu.be/oEOBdI3DxCk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=1h6m29s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:48:00Z","title":"Exploring the Capability of LLMs in Performing Low-Level Visual Analytic Tasks on SVG Data Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1193","abstract":"We present LinkQ, a system that leverages a large language model (LLM) to facilitate knowledge graph (KG) query construction through natural language question-answering. Traditional approaches often require detailed knowledge of a graph querying language, limiting the ability for users - even experts - to acquire valuable insights from KGs. LinkQ simplifies this process by implementing a multistep protocol in which the LLM interprets a user's question, then systematically converts it into a well-formed query. LinkQ helps users iteratively refine any open-ended questions into precise ones, supporting both targeted and exploratory analysis. Further, LinkQ guards against the LLM hallucinating outputs by ensuring users' questions are only ever answered from ground truth KG data. We demonstrate the efficacy of LinkQ through a qualitative study with five KG practitioners. Our results indicate that practitioners find LinkQ effective for KG question-answering, and desire future LLM-assisted exploratory data analysis systems.","accessible_pdf":true,"authors":[{"affiliations":["MIT Lincoln Laboratory, Lexington, United States"],"email":"harry.li@ll.mit.edu","is_corresponding":true,"name":"Harry Li"},{"affiliations":["Tufts University, Medford, United States"],"email":"gabriel.appleby@gmail.com","is_corresponding":false,"name":"Gabriel Appleby"},{"affiliations":["MIT Lincoln Laboratory, Lexington, United States"],"email":"ashley.suh@ll.mit.edu","is_corresponding":false,"name":"Ashley Suh"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1193","image_caption":"Exemplar workflow for LinkQ, a system leveraging an LLM for refining natural language questions into knowledge graph queries. The (A) Chat Panel lets users communicate with the LLM to ask specific or open-ended questions. The Query Preview Panel consists of three components: the (B1) Query Editor, which supports interactive editing; the (B2) Entity-Relation Table, which provides mapped data IDs from the KG, helping to assess the correctness of the LLM's generated query; and the (B3) Query Graph, which visualizes the structure of the query to illustrate the underlying schema of the KG. Finally, the (C) Results Panel provides a cleaned, exportable table as well as an LLM-generated summary based on the query results. Importantly, LinkQ ensures all data retrieved and summarized by the LLM comes from ground truth in the KG. ","keywords":["Knowledge graphs, large language models, query construction, question-answering, natural language interfaces."],"open_access_supplemental_link":"https://github.com/mit-ll/linkq","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2406.06621","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/L9rtmx-1eyM&t=0h48m51s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1193/v-short-1193_Preview.mp4?token=QWEmKWvXRK6Q35t4UEn04PKOw9ngniepfJ0NRx1_Fxk&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"QfXSQxEjhuM","session_youtube_ff_link":"https://youtu.be/QfXSQxEjhuM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h48m51s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T18:30:00Z","title":"LinkQ: An LLM-Assisted Visual Interface for Knowledge Graph Question-Answering","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1224","abstract":"Diffusion-based generative models\u2019 impressive ability to create convincing images has garnered global attention. However, their complex structures and operations often pose challenges for non-experts to grasp. We present Diffusion Explainer, the first interactive visualization tool that explains how Stable Diffusion transforms text prompts into images. Diffusion Explainer tightly integrates a visual overview of Stable Diffusion\u2019s complex structure with explanations of the underlying operations. By comparing image generation of prompt variants, users can discover the impact of keyword changes on image generation. A 56-participant user study demonstrates that Diffusion Explainer offers substantial learning benefits to non-experts. Our tool has been used by over 10,300 users from 124 countries at https://poloclub.github.io/diffusion-explainer/.","accessible_pdf":true,"authors":[{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"seongmin@gatech.edu","is_corresponding":true,"name":"Seongmin Lee"},{"affiliations":["GA Tech, Atlanta, United States","IBM Research AI, Cambridge, United States"],"email":"benjamin.hoover@ibm.com","is_corresponding":false,"name":"Benjamin Hoover"},{"affiliations":["IBM Research AI, Cambridge, United States"],"email":"hendrik@strobelt.com","is_corresponding":false,"name":"Hendrik Strobelt"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"jayw@gatech.edu","is_corresponding":false,"name":"Zijie J. Wang"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"speng65@gatech.edu","is_corresponding":false,"name":"ShengYun Peng"},{"affiliations":["Georgia Institute of Technology , Atlanta , United States"],"email":"apwright@gatech.edu","is_corresponding":false,"name":"Austin P Wright"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"kevin.li@gatech.edu","is_corresponding":false,"name":"Kevin Li"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"haekyu@gatech.edu","is_corresponding":false,"name":"Haekyu Park"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"alexanderyang@gatech.edu","is_corresponding":false,"name":"Haoyang Yang"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"polo@gatech.edu","is_corresponding":false,"name":"Duen Horng (Polo) Chau"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1224","image_caption":"With Diffusion Explainer, users can visually examine how text prompt (e.g., \u201ca cute and adorable bunny... pixar character\u201d) is encoded by the Text Representation Generator into vectors to guide the Image Representation Refiner to iteratively refine the vector representation of the image being generated. The Timestep Controller enables users to review the incremental improvements in image quality and adherence to the prompt over timesteps. Diffusion Explainer tightly integrates a visual overview of Stable Diffusion\u2019s complex components with detailed explanations of their underlying operations, enabling users to fluidly transition between multiple levels of abstraction through animations and interactive elements.","keywords":["Machine Learning, Statistics, Modelling, and Simulation Applications; Software Prototype"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/pdf/2305.03509","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/L9rtmx-1eyM&t=0h10m0s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1224/v-short-1224_Preview.mp4?token=Gsc_ECc1b4rCDT_VV1OB8FzF3h6FXOzMo2Y0u5CfSmc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1224/v-short-1224_Preview.srt?token=2uEmmGpRHkM7GDs0bqg-aGpRfDyJcUhPq8zTQtGvo44&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short3","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: AI and LLM","session_uid":"v-short","session_youtube_ff_id":"1En1p1RBKr4","session_youtube_ff_link":"https://youtu.be/1En1p1RBKr4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/L9rtmx-1eyM&t=0h10m0s","sessions":["Short Papers: AI and LLM"],"time_stamp":"2024-10-17T17:54:00Z","title":"Diffusion Explainer: Visual Explanation for Text-to-image Stable Diffusion","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1057","abstract":"Real-world datasets often consist of quantitative and categorical variables. The analyst needs to focus on either kind separately or both jointly. We proposed a visualization technique tackling these challenges that supports visual cluster and set analysis. In this paper, we investigate how its visualization parameters affect the accuracy and speed of cluster and set analysis tasks in a controlled experiment. Our findings show that, with the proper settings, our visualization can support both task types well. However, we did not find settings suitable for the joint task, which provides opportunities for future research.","accessible_pdf":false,"authors":[{"affiliations":["TU Wien, Vienna, Austria"],"email":"nikolaus.piccolotto@tuwien.ac.at","is_corresponding":false,"name":"Nikolaus Piccolotto"},{"affiliations":["TU Wien, Vienna, Austria"],"email":"mwallinger@ac.tuwien.ac.at","is_corresponding":true,"name":"Markus Wallinger"},{"affiliations":["Institute of Visual Computing and Human-Centered Technology, Vienna, Austria"],"email":"miksch@ifs.tuwien.ac.at","is_corresponding":false,"name":"Silvia Miksch"},{"affiliations":["TU Wien, Vienna, Austria"],"email":"markus.boegl@tuwien.ac.at","is_corresponding":false,"name":"Markus B\u00f6gl"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1057","image_caption":"Our results show that layouts focused on multidimensional similarities supported a multidimensional cluster analysis task, layouts focused on set similarities supported set relation tasks, and neither layout supported the joint task well. ","keywords":["Visual cluster analysis, set visualization."],"open_access_supplemental_link":"https://osf.io/8gxzw/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://osf.io/zx9s6","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/jlbzvyg9IZc&t=0h0m54s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1057/v-short-1057_Preview.mp4?token=qKv-_jwVq8nvYMJYoaCoOCynyV0WJEeZi79eGjVM1Fw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1057/v-short-1057_Preview.srt?token=f0HmYwM8JFtI6Mcg0_EBs7-Ig5iE6VS68TdE2a7LZvc&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"ah4dt96Yo1M","session_youtube_ff_link":"https://youtu.be/ah4dt96Yo1M","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h0m54s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T12:30:00Z","title":"On Combined Visual Cluster and Set Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1065","abstract":"Although many dimensionality reduction (DR) techniques employ stochastic methods for computational efficiency, such as negative sampling or stochastic gradient descent, their impact on the projection has been underexplored. In this work, we investigate how such stochasticity affects the stability of projections and present a novel DR technique, GhostUMAP, to measure the pointwise instability of projections. Our idea is to introduce clones of data points, \u201cghosts\u201d, into UMAP\u2019s layout optimization process. Ghosts are designed to be completely passive: they do not affect any others but are influenced by attractive and repulsive forces from the original data points. After a single optimization run, GhostUMAP can capture the projection instability of data points by measuring the variance with the projected positions of their ghosts. We also present a successive halving technique to reduce the computation of GhostUMAP. Our results suggest that GhostUMAP can reveal unstable data points with a reasonable computational overhead.","accessible_pdf":true,"authors":[{"affiliations":["Sungkyunkwan University, Suwon, Korea, Republic of"],"email":"mw.jung@skku.edu","is_corresponding":true,"name":"Myeongwon Jung"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"takanori.fujiwara@liu.se","is_corresponding":false,"name":"Takanori Fujiwara"},{"affiliations":["Sungkyunkwan University, Suwon, Korea, Republic of"],"email":"jmjo@skku.edu","is_corresponding":false,"name":"Jaemin Jo"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1065","image_caption":"Each projection is part of a GhostUMAP projection generated for the CIFAR-10 dataset. Case (A) depicts the trajectories of a stable point where the original projection (blue cross) and its ghosts (blue triangles) are projected to a consistent location. In contrast, Case (B) shows the trajectories of an unstable point. The trajectories diverge, implying instability in the final projection of the point (orange cross).","keywords":["Dimensionality Reduction"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/jlbzvyg9IZc&t=0h55m43s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1065/v-short-1065_Preview.mp4?token=S4Ft136d_boh8u-gXn-wAdiz9Aw_YFeVRCGqkHB9JGk&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"99IVMIqYnfA","session_youtube_ff_link":"https://youtu.be/99IVMIqYnfA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h55m43s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T13:24:00Z","title":"GhostUMAP: Measuring Pointwise Instability in Dimensionality Reduction","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1096","abstract":"Coordinated multiple views (CMV) in a visual analytics system can help users explore multiple data representations simultaneously with linked interactions. However, the implementation of coordinated multiple views can be challenging. Without standard software libraries, visualization designers need to re-implement CMV during the development of each system. We introduce use-coordination, a grammar and software library that supports the efficient implementation of CMV. The grammar defines a JSON-based representation for an abstract coordination model from the information visualization literature. We contribute an optional extension to the model and grammar that allows for hierarchical coordination. Through three use cases, we show that use-coordination enables implementation of CMV in systems containing not only basic statistical charts but also more complex visualizations such as medical imaging volumes. We describe six software extensions, including a graphical editor for manipulation of coordination, which showcase the potential to build upon our coordination-focused declarative approach. The software is open-source and available at https://use-coordination.dev.","accessible_pdf":true,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"mark_keller@hms.harvard.edu","is_corresponding":true,"name":"Mark S Keller"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"trevor_manz@g.harvard.edu","is_corresponding":false,"name":"Trevor Manz"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1096","image_caption":"Our use-coordination approach streamlines the implementation of coordinated multiple views (CMV) by leveraging a declarative grammar and embracing modern reactive user interface development frameworks. Use-coordination is flexible because it is decoupled from any particular data type or visualization approach.","keywords":["Visualization toolkits, visual analytics, domain specific languages"],"open_access_supplemental_link":"https://doi.org/10.17605/OSF.IO/SEJN5","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://doi.org/10.31219/osf.io/vhs7m","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/jlbzvyg9IZc&t=1h4m28s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1096/v-short-1096_Preview.mp4?token=BkrEnuXnDc6Qq3r-mvF-Xx8BXFUnzobudxpjY1msKvc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1096/v-short-1096_Preview.srt?token=-1IIZRjdDEGawXQSUfwnaBL9Zb7s1YT8EiH2LF6Ov7Q&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"yUeqo0sWUgU","session_youtube_ff_link":"https://youtu.be/yUeqo0sWUgU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=1h4m28s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T13:33:00Z","title":"Use-Coordination: Model, Grammar, and Library for Implementation of Coordinated Multiple Views","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1121","abstract":"Many real-world networks contain structurally-equivalent nodes. These are defined as vertices that share the same set of neighboring nodes, making them interchangeable with a traditional graph layout approach. However, many real-world graphs also have properties associated with nodes, adding additional meaning to them. We present an approach for swapping locations of structurally-equivalent nodes in graph layout so that those with more similar properties have closer proximity to each other. This improves the usefulness of the visualization from an attribute perspective without negatively impacting the visualization from a structural perspective. We include an algorithm for finding these sets of nodes in linear time, as well as methodologies for ordering nodes based on their attribute similarity, which works for scalar, ordinal, multidimensional, and categorical data.","accessible_pdf":false,"authors":[{"affiliations":["Pacific Northwest National Lab, Richland, United States"],"email":"patrick.mackey@pnnl.gov","is_corresponding":true,"name":"Patrick Mackey"},{"affiliations":["University of Arizona, Tucson, United States","Pacific Northwest National Laboratory, Richland, United States"],"email":"jacobmiller1@arizona.edu","is_corresponding":false,"name":"Jacob Miller"},{"affiliations":["Pacific Northwest National Laboratory, Richland, United States"],"email":"liz.f@pnnl.gov","is_corresponding":false,"name":"Liz Faultersack"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1121","image_caption":"An example of a property graph layout after having the structurally-equivalent nodes re-arranged based on their attribute similarity.","keywords":["graph drawing, network visualization, property graphs, attributed networks"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/jlbzvyg9IZc&t=0h19m12s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1121/v-short-1121_Preview.mp4?token=OUM0v9qW79f7TfsRaOeb77D6h02SwUbmQkWN-KwS6DE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1121/v-short-1121_Preview.srt?token=zTMNeUIQMHuumaHQigaiATn_3NTWlCLrcgsn3B4gIss&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"JrF56KcFXuU","session_youtube_ff_link":"https://youtu.be/JrF56KcFXuU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h19m12s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T12:48:00Z","title":"Improving Property Graph Layouts by Leveraging Attribute Similarity for Structurally Equivalent Nodes","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1135","abstract":"Humans struggle to perceive and interpret high-dimensional data. Therefore, high-dimensional data are often projected into two dimensions for visualization. Many applications benefit from complex nonlinear dimensionality reduction techniques, but the effects of individual high-dimensional features are hard to explain in the two-dimensional space. Most visualization solutions use multiple two-dimensional plots, each showing the effect of one high-dimensional feature in two dimensions; this approach creates a need for a visual inspection of k plots for a k-dimensional input space. Our solution, Feature Clock, provides a novel approach that eliminates the need to inspect these k plots to grasp the influence of original features on the data structure depicted in two dimensions. Feature Clock enhances the explainability and compactness of visualizations of embedded data and is available in an open-source Python library.","accessible_pdf":true,"authors":[{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"ovcharenko.folga@gmail.com","is_corresponding":true,"name":"Olga Ovcharenko"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"rita.sevastjanova@inf.ethz.ch","is_corresponding":false,"name":"Rita Sevastjanova"},{"affiliations":["ETH Zurich, Z\u00fcrich, Switzerland"],"email":"valentina.boeva@inf.ethz.ch","is_corresponding":false,"name":"Valentina Boeva"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1135","image_caption":"Feature Clock uses high-dimensional data, and shows the largest contribution of each high-dimensional feature in two-dimensional space.","keywords":["High-dimensional data, nonlinear dimensionality reduction, feature importance, visualization"],"open_access_supplemental_link":"https://github.com/OlgaOvcharenko/feature_clock_visualization","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/jlbzvyg9IZc&t=0h37m27s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1135/v-short-1135_Preview.mp4?token=Uyqr0ftF16lo7feojMYgYvpEeIgXN8ryZJSrHS7eDK4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1135/v-short-1135_Preview.srt?token=kLhkJ66oQpLM2hA0d3adRQIDUWOKTCFpj0d4B-1jquU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"jKrGV7L6pFY","session_youtube_ff_link":"https://youtu.be/jKrGV7L6pFY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h37m27s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T13:06:00Z","title":"Feature Clock: High-Dimensional Effects in Two-Dimensional Plots","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1156","abstract":"Compound graphs are networks in which vertices can be grouped into larger subsets, with these subsets capable of further grouping, resulting in a nesting that can be many levels deep. In several applications, including biological workflows, chemical equations, and computational data flow analysis, these graphs often exhibit a tree-like nesting structure, where sibling clusters are disjoint. Common compound graph layouts prioritize the lowest level of the grouping, down to the individual ungrouped vertices, which can make the higher level grouped structures more difficult to discern, especially in deeply nested networks. Leveraging the additional structure of the tree-like nesting, we contribute an overview+detail layout for this class of compound graphs that preserves the saliency of the higher level network structure when groups are expanded to show internal nested structure. Our layout draws inner structures adjacent to their parents, using a modified tree layout to place substructures. We describe our algorithm and then present case studies demonstrating the layout's utility to a domain expert working on data flow analysis. Finally, we discuss network parameters and analysis situations in which our layout is well suited.","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"hatch.on27@gmail.com","is_corresponding":true,"name":"Chang Han"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"lieffers@arizona.edu","is_corresponding":false,"name":"Justin Lieffers"},{"affiliations":["University of Arizona, Tucson, United States"],"email":"claytonm@arizona.edu","is_corresponding":false,"name":"Clayton Morrison"},{"affiliations":["The University of Utah, Salt Lake City, United States"],"email":"kisaacs@sci.utah.edu","is_corresponding":false,"name":"Katherine E. Isaacs"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1156","image_caption":"An illustration of our proposed variant of Reingold-Tilford algorithm. The input data is shown in both our layout and a tree view without inner structure. As we follow the RT bottom-up placement, we place group parents with respect to expanded children based on the position of their corresponding internal node. We then make separation passes in both directions of tree expansion.","keywords":["compound graphs, network layout, graph drawing, network visualization, graph visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.04045","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/jlbzvyg9IZc&t=0h9m56s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1156/v-short-1156_Preview.mp4?token=i-sxI1vy8rJyeDsR7LjNQfCNMoewFjGySDcH_2WTyaI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1156/v-short-1156_Preview.srt?token=DCH2aWCrsMk4wZ-3G0MiS_Vw0M_6KHoo974zKrdhyT8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"PlQT_Hpz0zg","session_youtube_ff_link":"https://youtu.be/PlQT_Hpz0zg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h9m56s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T12:39:00Z","title":"An Overview + Detail Layout for Visualizing Compound Graphs","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1173","abstract":"Visualizing citation relations with network structures is widely used, but the visual complexity can make it challenging for individual researchers to navigate through them. We collected data from 18 researchers using an interface that we designed using network simplification methods and analyzed how users browsed and identified important papers. Our analysis reveals six major patterns used for identifying papers of interest, which can be categorized into three key components: Fields, Bridges, and Foundations, each viewed from two distinct perspectives: layout-oriented and connection-oriented. The connection-oriented approach was found to be more reliable for selecting relevant papers, but the layout-oriented method was adopted more often, even though it led to unexpected results and user frustration. Our findings emphasize the importance of integrating these components and the necessity to balance visual layouts with meaningful connections to enhance the effectiveness of citation networks in academic browsing systems.","accessible_pdf":true,"authors":[{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"krchoe@hcil.snu.ac.kr","is_corresponding":true,"name":"Kiroong Choe"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"gracekim027@snu.ac.kr","is_corresponding":false,"name":"Eunhye Kim"},{"affiliations":["Dept. of Electrical and Computer Engineering, SNU, Seoul, Korea, Republic of"],"email":"paulmoguri@snu.ac.kr","is_corresponding":false,"name":"Sangwon Park"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"jseo@snu.ac.kr","is_corresponding":false,"name":"Jinwook Seo"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1173","image_caption":"We identified six patterns that researchers utilize to browse citation networks and discover papers of interest. Component-wise, these patterns can be classified to: Field (i.e., related papers on a single research topic), Bridge (i.e., logical connections between papers or topics), and Foundation (i.e., stages in the broad development of research). For each component, there were two different perspectives: layout-oriented or connection-oriented. Our analysis suggests that researchers generally preferred the layout-oriented perspective for its intuitiveness, but papers identified through the connection-oriented perspective were typically more useful.","keywords":["Literature search, network visualization"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2405.07267","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/jlbzvyg9IZc&t=0h28m32s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1173/v-short-1173_Preview.mp4?token=yxKADsYdJzT9lfoXD_QC0ilL3z2091KwudYj09XOTmw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1173/v-short-1173_Preview.srt?token=f_Xk6ld97NrVlsDS6hylB1ssCIBKRRFAsDWK07vQ5Nw&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"AlqlP1Rto84","session_youtube_ff_link":"https://youtu.be/AlqlP1Rto84","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h28m32s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T12:57:00Z","title":"Fields, Bridges, and Foundations: How Researchers Browse Citation Network Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1235","abstract":"A high number of samples often leads to occlusion in scatterplots, which hinders data perception and analysis. De-cluttering approaches based on spatial transformation reduce visual clutter by remapping samples using the entire available scatterplot domain. Such regularized scatterplots may still be used for data analysis tasks, if the spatial transformation is smooth and preserves the original neighborhood relations of samples. Recently, Rave et al. proposed an efficient regularization method based on integral images. We propose a generalization of their regularization scheme using sector-based transformations with the aim of increasing sample uniformity of the resulting scatterplot. We document the improvement of our approach using various uniformity measures.","accessible_pdf":false,"authors":[{"affiliations":["University of M\u00fcnster, M\u00fcnster, Germany"],"email":"hennes.rave@uni-muenster.de","is_corresponding":true,"name":"Hennes Rave"},{"affiliations":["University of M\u00fcnster, M\u00fcnster, Germany"],"email":"molchano@uni-muenster.de","is_corresponding":false,"name":"Vladimir Molchanov"},{"affiliations":["University of M\u00fcnster, M\u00fcnster, Germany"],"email":"linsen@uni-muenster.de","is_corresponding":false,"name":"Lars Linsen"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1235","image_caption":"Sector-based transformation of a UMAP embedding of the Iris dataset. 16 sectors and anchor points for a selected sample are shown for the original scatterplot. The black anchor point at the bottom belongs to the highlighted sector at the top. Samples are moved toward a sector's anchor point based on the point density inside that sector. The resulting displacement vector is shown in blue.","keywords":["Scatterplot de-cluttering, spatial transformation."],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/jlbzvyg9IZc&t=0h47m15s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1235/v-short-1235_Preview.mp4?token=cvnyCz3mlT9aNamwFo0HrmecGd8sSWZosP2uo4-O4go&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short4","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Graph, Hierarchy and Multidimensional","session_uid":"v-short","session_youtube_ff_id":"CF_fK_gXpZU","session_youtube_ff_link":"https://youtu.be/CF_fK_gXpZU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/jlbzvyg9IZc&t=0h47m15s","sessions":["Short Papers: Graph, Hierarchy and Multidimensional"],"time_stamp":"2024-10-16T13:15:00Z","title":"Uniform Sample Distribution in Scatterplots via Sector-based Transformation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1049","abstract":"This comparative study evaluates various neural surface reconstruction methods, particularly focusing on their implications for scientific visualization through reconstructing 3D surfaces via multi-view rendering images. We categorize ten methods into neural radiance fields and neural implicit surfaces, uncovering the benefits of leveraging distance functions (i.e., SDFs and UDFs) to enhance the accuracy and smoothness of the reconstructed surfaces. Our findings highlight the efficiency and quality of NeuS2 for reconstructing closed surfaces and identify NeUDF as a promising candidate for reconstructing open surfaces despite some limitations. By sharing our benchmark dataset, we invite researchers to test the performance of their methods, contributing to the advancement of surface reconstruction solutions for scientific visualization.","accessible_pdf":true,"authors":[{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"syao2@nd.edu","is_corresponding":true,"name":"Siyuan Yao"},{"affiliations":["Wuhan University, Wuhan, China"],"email":"song.wx@whu.edu.cn","is_corresponding":false,"name":"Weixi Song"},{"affiliations":["University of Notre Dame, Notre Dame, United States"],"email":"chaoli.wang@nd.edu","is_corresponding":false,"name":"Chaoli Wang"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1049","image_caption":"We selected 10 representative surface reconstruction methods and created 9 datasets for evaluation. Each dataset comprises 42 images for training and 181 images for testing. After training the models, we used them to generate neural surface rendering images and reconstruct surface polygon meshes. The synthesized results were evaluated using peak signal-to-noise ratio (PSNR), learned perceptual image patch similarity (LPIPS) against ground truth images, and chamfer distance against the ground truth surface mesh. We also comprehensively analyzed the results, including model design and performance.","keywords":["Machine Learning Techniques, Datasets"],"open_access_supplemental_link":"https://www.kaggle.com/datasets/syaond/scivis-surface-dataset/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.20868","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/__o_dwELzN8&t=0h28m20s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1049/v-short-1049_Preview.mp4?token=uoTYztiyhPmlmrydoqmYzohPfMPzTYBsVUdGvLxD_WQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1049/v-short-1049_Preview.srt?token=LrchUQH0UK--wYWJWgdYgRt-EeJDWRM6Hl2rKh5bRrs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"gC0jSUB5PvU","session_youtube_ff_link":"https://youtu.be/gC0jSUB5PvU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h28m20s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:27:00Z","title":"A Comparative Study of Neural Surface Reconstruction for Scientific Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1054","abstract":"Direct volume rendering using ray-casting is widely used in practice. By using GPUs and applying acceleration techniques as empty space skipping, high frame rates are possible on modern hardware.This enables performance-critical use-cases such as virtual reality volume rendering. The currently fastest known technique uses volumetric distance maps to skip empty sections of the volume during ray-casting but requires the distance map to be updated per transfer function change. In this paper, we demonstrate a technique for subdividing the volume intensity range into partitions and deriving what we call partitioned distance maps. These can be used to accelerate the distance map computation for a newly changed transfer function by a factor up to 30. This allows the currently fastest known empty space skipping approach to be used while maintaining high frame rates even when the transfer function is changed frequently.","accessible_pdf":true,"authors":[{"affiliations":["University of Applied Sciences Wiener Neustadt, Wiener Neustadt, Austria"],"email":"michael.rauter@fhwn.ac.at","is_corresponding":true,"name":"Michael Rauter"},{"affiliations":["Medical University of Vienna, Vienna, Austria"],"email":"lukas.a.zimmermann@meduniwien.ac.at","is_corresponding":false,"name":"Lukas Zimmermann"},{"affiliations":["University of Applied Sciences Wiener Neustadt, Wiener Neustadt, Austria"],"email":"markus.zeilinger@fhwn.ac.at","is_corresponding":false,"name":"Markus Zeilinger"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1054","image_caption":"Direct volume renderings of the manix dataset applying distinct transfer functions. Distance map based empty space skipping can be used to accelerate rendering. Different transfer functions result in different distance maps as indicated in the image. Therefore, it is required to recompute the distance map on a transfer function update. In the paper, we demonstrate how to compute the distance map faster than before by computing what we call partitioned distance maps as a preprocessing step, and combining them into the final distance map at runtime.","keywords":["Computing methodologies\u2014Computer graphics\u2014Rendering, Theory of computation\u2014Design and analysis of algorithms\u2014Data structures design and analysis."],"open_access_supplemental_link":"https://osf.io/n5k6z","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.21552","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/__o_dwELzN8&t=0h0m20s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1054/v-short-1054_Preview.mp4?token=cb8cLTNiWrnvMHsDzo4kJfGqvCTgwuvoHALUniFGIR8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"De6SwX2KSV4","session_youtube_ff_link":"https://youtu.be/De6SwX2KSV4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h0m20s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:00:00Z","title":"Accelerating Transfer Function Update for Distance Map based Volume Rendering","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1119","abstract":"Analyzing uncertainty in spatial data is a vital task in many domains, as for example with climate and weather simulation ensembles. Although many methods support the analysis of uncertain 2D data, such as uncertain isocontours or overlaying of statistical information on plots of the actual data, it is still a challenge to get a more detailed overview of 2D data together with its statistical properties. We present cumulative height fields, a visualization method for 2D scalar field ensembles using the marginal empirical distribution function and show preliminary results using volume rendering and slicing for the Max Planck Institute Grand Ensemble.","accessible_pdf":false,"authors":[{"affiliations":["Institute of Computer Science, Leipzig University, Leipzig, Germany"],"email":"daetz@informatik.uni-leipzig.de","is_corresponding":true,"name":"Tomas Daetz"},{"affiliations":["German Climate Computing Center (DKRZ), Hamburg, Germany"],"email":"boettinger@dkrz.de","is_corresponding":false,"name":"Michael B\u00f6ttinger"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"scheuermann@informatik.uni-leipzig.de","is_corresponding":false,"name":"Gerik Scheuermann"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"heine@informatik.uni-leipzig.de","is_corresponding":false,"name":"Christian Heine"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1119","image_caption":"Precipitation change (%) in 2080-2099 relative to 1986-2005 based on 100 simulation runs of the RCP8.5 scenario within MPI-GE. (a) shows a direct volume rendering of the cumulative height field using a 2D transfer function, mapping cumulative probabilities to opacity and precipitation change to color (blue: increase, red: decrease), and an isosurface of the median. (d) shows an orthographic view from the top. The intersection of the black lines show the point of interest (0\u00b0, 170\u00b0W). (b) and (c) show the cumulative function graphs along each component of the point of interest. The purple lines depict the zero percent difference. ","keywords":["Scalar field visualization, ensemble visualization, volume rendering, nonparametric statistics."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/__o_dwELzN8&t=0h37m10s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1119/v-short-1119_Preview.mp4?token=it5hcr8mic5-t_R9wL8DHtwmG8pL9-YBm8UwxJG2i7A&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1119/v-short-1119_Preview.srt?token=pa9nm3tAGNmlCt9qIZjWGXj1IRf6yQ7cyItR1aCBEBg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"muHSHH_zJK8","session_youtube_ff_link":"https://youtu.be/muHSHH_zJK8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h37m10s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:36:00Z","title":"Visualization of 2D Scalar Field Ensembles Using Volume Visualization of the Empirical Distribution Function","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1127","abstract":"In this paper, we analyze the Apple Vision Pro hardware and the visionOS software platform, assessing their capabilities for volume rendering of structured grids---a prevalent technique across various applications. The Apple Vision Pro supports multiple display modes, from classical augmented reality (AR) using video see-through technology to immersive virtual reality (VR) environments that exclusively render virtual objects. These modes utilize different APIs and exhibit distinct capabilities. Our focus is on direct volume rendering, selected for its implementation challenges due to the native graphics APIs being predominantly oriented towards surface shading. Volume rendering is particularly vital in fields where AR and VR visualizations offer substantial benefits, such as in medicine and manufacturing. Despite its initial high cost, we anticipate that the Vision Pro will become more accessible and affordable over time, following Apple's track record of market expansion. As these devices become more prevalent, understanding how to effectively program and utilize them becomes increasingly important, offering significant opportunities for innovation and practical applications in various sectors.","accessible_pdf":false,"authors":[{"affiliations":["University of Duisburg-Essen, Duisburg, Germany"],"email":"camilla.hrycak@uni-due.de","is_corresponding":true,"name":"Camilla Hrycak"},{"affiliations":["University of Duisburg-Essen, Duisburg, Germany"],"email":"david.lewakis@stud.uni-due.de","is_corresponding":false,"name":"David Lewakis"},{"affiliations":["University of Duisburg-Essen, Duisburg, Germany"],"email":"jens.krueger@uni-due.de","is_corresponding":false,"name":"Jens Harald Krueger"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1127","image_caption":"Screenshots of our testbed direct volume rendering application on the Apple Vision Pro. From Top: Slice-based volume rendering in a shared space with video see-through, f Bottom: Rendering the dataset in a fully immersive space. Notice varying image quality across the figures due to active foveation.","keywords":["Apple Vision Pro, Volume Rendering, Virtual Reality, Augmented Reality"],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://www.cgvis.de/publications/2024/hrycak_2024_vision1.pdf","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/__o_dwELzN8&t=0h19m5s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1127/v-short-1127_Preview.mp4?token=EF0pYVl1SFuGLoQVOYV5qGbXuSquoNn8ED0zBmwTrwY&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1127/v-short-1127_Preview.srt?token=GGFYa8zsYzU02kMeu2J9cBwMjRY471VZhuaEsPlx76w&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"C09ujoXAnWg","session_youtube_ff_link":"https://youtu.be/C09ujoXAnWg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h19m5s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:18:00Z","title":"Investigating the Apple Vision Pro Spatial Computing Platform for GPU-Based Volume Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1155","abstract":"Augmented reality (AR) area labels can visualize real world regions with arbitrary boundaries and show invisible objects or features. But environment conditions such as lighting and clutter can decrease fixed or passive label visibility, and labels that have high opacity levels can occlude crucial details in the environment. We design and evaluate active AR area label visualization modes to enhance visibility across real-life environments, while still retaining environment details within the label. For this, we define a distant characteristic color from the environment in perceptual CIELAB space, then introduce spatial variations among label pixel colors based on the underlying environment variation. In a user study with 18 participants, we found that our active label visualization modes can be comparable in visibility to a fixed green baseline by Gabbard et al., and can outperform it with added spatial variation in cluttered environments, across varying levels of lighting (e.g., nighttime), and in environments with colors similar to the fixed baseline color.","accessible_pdf":false,"authors":[{"affiliations":["Brown University, Providence, United States"],"email":"hojung_kwon@brown.edu","is_corresponding":true,"name":"Hojung Kwon"},{"affiliations":["Brown University, Providence, United States"],"email":"yuanbo_li@brown.edu","is_corresponding":false,"name":"Yuanbo Li"},{"affiliations":["Brown University, Providence, United States"],"email":"chloe_ye2019@hotmail.com","is_corresponding":false,"name":"Xiaohan Ye"},{"affiliations":["Brown University, Providence, United States"],"email":"praccho_muna-mcquay@brown.edu","is_corresponding":false,"name":"Praccho Muna-McQuay"},{"affiliations":["Duke University, Durham, United States"],"email":"liuren.yin@duke.edu","is_corresponding":false,"name":"Liuren Yin"},{"affiliations":["Brown University, Providence, United States"],"email":"james_tompkin@brown.edu","is_corresponding":false,"name":"James Tompkin"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1155","image_caption":"Top left: If an AR area label has a similar color to the environment, we cannot easily see the label. Top right: If the label is too opaque, it occludes the environment. Bottom left: We automatically change label colors to increase visibility. Bottom right: We add spatial variation within a label to reduce background occlusion. (Background image source: Dubai360, 8K 360 Degree Timelapse of Dubai Marina) ","keywords":["Augmented reality, active labels, environment-adaptive"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/__o_dwELzN8&t=1h4m35s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1155/v-short-1155_Preview.mp4?token=dpGKqhvLa6sMi3Edw8lp-dvqWiXCdIARUWf0TC-HX84&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1155/v-short-1155_Preview.srt?token=iYyTyQyGsTVOYivhq8Te7feVuQfNyhsknqjkGJdIT5k&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"O978Fqk58Fw","session_youtube_ff_link":"https://youtu.be/O978Fqk58Fw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=1h4m35s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T17:03:00Z","title":"Active Appearance and Spatial Variation Can Improve Visibility in Area Labels for Augmented Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1183","abstract":"An atmospheric front is an imaginary surface that separates two distinct air masses and is commonly defined as the warm-air side of a frontal zone with high gradients of atmospheric temperature and humidity (Fig. 1, left). These fronts are a widely used conceptual model in meteorology, which are often encountered in the literature as two-dimensional (2D) front lines on surface analysis charts. This paper presents a method for computing three-dimensional (3D) atmospheric fronts as surfaces that is capable of extracting continuous and well-confined features suitable for 3D visual analysis, spatio- temporal tracking, and statistical analyses (Fig. 1, middle, right). Recently developed contour-based methods for 3D front extraction rely on computing the third derivative of a moist potential temperature field. Additionally, they require the field to be smoothed to obtain continuous large-scale structures. This paper demonstrates the feasibility of an alternative method to front extraction using ridge surface computation. The proposed method requires only the second derivative of the input field and produces accurate structures even from unsmoothed data. An application of the ridge-based method to a data set corresponding to Cyclone Friederike demonstrates its benefits and utility towards visual analysis of the full 3D structure of fronts.","accessible_pdf":false,"authors":[{"affiliations":["Zuse Institute Berlin, Berlin, Germany"],"email":"anne.gossing@fu-berlin.de","is_corresponding":true,"name":"Anne Gossing"},{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"andreas.beckert@uni-hamburg.de","is_corresponding":false,"name":"Andreas Beckert"},{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"christoph.fischer-1@uni-hamburg.de","is_corresponding":false,"name":"Christoph Fischer"},{"affiliations":["Zuse Institute Berlin, Berlin, Germany"],"email":"klenert@zib.de","is_corresponding":false,"name":"Nicolas Klenert"},{"affiliations":["Indian Institute of Science, Bangalore, India"],"email":"vijayn@iisc.ac.in","is_corresponding":false,"name":"Vijay Natarajan"},{"affiliations":["Freie Universit\u00e4t Berlin, Berlin, Germany"],"email":"george.pacey@fu-berlin.de","is_corresponding":false,"name":"George Pacey"},{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"thorwin.vogt@uni-hamburg.de","is_corresponding":false,"name":"Thorwin Vogt"},{"affiliations":["Universit\u00e4t Hamburg, Hamburg, Germany"],"email":"marc.rautenhaus@uni-hamburg.de","is_corresponding":false,"name":"Marc Rautenhaus"},{"affiliations":["Zuse Institute Berlin, Berlin, Germany"],"email":"baum@zib.de","is_corresponding":false,"name":"Daniel Baum"}],"award":"honorable","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1183","image_caption":"Atmospheric fronts play a significant role in mid-latitude weather dynamics and are responsible for 50% - and locally up to 90% - of extreme precipitation. To support visual analysis of frontal processes, in this paper we present a ridge-based approach for the extraction and visualization of three-dimensional atmospheric fronts. Current contour-based visualization techniques require data smoothing that can lead to local inaccuracies, whereas our ridge detection algorithm extracts fronts as continuous surfaces without smoothing. This preserves the original data resolution, thereby facilitating the investigation of small-scale processes in frontal environments. ","keywords":["Atmospheric front, ridge surface, visual analysis."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/__o_dwELzN8&t=0h9m31s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1183/v-short-1183_Preview.mp4?token=07DX41XUOHJ0wsi8xtxI1j34lYN1AbYb3Uz6Cb4WH70&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"G6iZGuhjBf4","session_youtube_ff_link":"https://youtu.be/G6iZGuhjBf4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h9m31s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:09:00Z","title":"A Ridge-based Approach for Extraction and Visualization of 3D Atmospheric Fronts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1211","abstract":"Transfer function design is crucial in volume rendering, as it directly influences the visual representation and interpretation of volumetric data. However, creating effective transfer functions that align with users' visual objectives is often challenging due to the complex parameter space and the semantic gap between transfer function values and features of interest within the volume. In this work, we propose a novel approach that leverages recent advancements in language-vision models to bridge this semantic gap. By employing a fully differentiable rendering pipeline and an image-based loss function guided by language descriptions, our method generates transfer functions that yield volume-rendered images closely matching the user's intent. We demonstrate the effectiveness of our approach in creating meaningful transfer functions from simple descriptions, empowering users to intuitively express their desired visual outcomes with minimal effort. This advancement streamlines the transfer function design process and makes volume rendering more accessible to a wider range of users.","accessible_pdf":true,"authors":[{"affiliations":["Vanderbilt University, Nashville, United States"],"email":"sangwon.jeong@vanderbilt.edu","is_corresponding":true,"name":"Sangwon Jeong"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jixianli@sci.utah.edu","is_corresponding":false,"name":"Jixian Li"},{"affiliations":["Lawrence Livermore National Laboratory , Livermore, United States"],"email":"shusenl@sci.utah.edu","is_corresponding":false,"name":"Shusen Liu"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"},{"affiliations":["Vanderbilt University, Nashville, United States"],"email":"matthew.berger@vanderbilt.edu","is_corresponding":false,"name":"Matthew Berger"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1211","image_caption":"A gallery of volume renderings found using Text-2-Transfer Function method. Our method can produce transfer functions focusing on various visual properties such as color, material, or abstract concepts such as \u201ccinematic.\u201d","keywords":["Transfer function design, vision-language model"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2406.15634","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/__o_dwELzN8&t=0h46m21s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1211/v-short-1211_Preview.mp4?token=Dxy0WyVTUlGxl3ru8PfJoSVnbTBqd4KZ9Cm8gWcfLCk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1211/v-short-1211_Preview.srt?token=zX4eGNJo_w-PwbJLOF9-awAxMGW1LNJqLUpOiqCvdig&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"wtl-zKpboLg","session_youtube_ff_link":"https://youtu.be/wtl-zKpboLg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h46m21s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:45:00Z","title":"Text-based transfer function design for semantic volume rendering","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1292","abstract":"Collaborative planning for congenital heart diseases typically involves creating physical heart models through 3D printing, which are then examined by both surgeons and cardiologists. Recent developments in mobile augmented reality (AR) technologies have presented a viable alternative, known for their ease of use and portability. However, there is still a lack of research examining the utilization of multi-user mobile AR environments to support collaborative planning for cardiovascular surgeries. We created ARCollab, an iOS AR app designed for enabling multiple surgeons and cardiologists to interact with a patient's 3D heart model in a shared environment. ARCollab enables surgeons and cardiologists to import heart models, manipulate them through gestures and collaborate with other users, eliminating the need for fabricating physical heart models. Our evaluation of ARCollab's usability and usefulnessin enhancing collaboration, conducted with three cardiothoracic surgeons and two cardiologists, marks the first human evaluation of a multi-user mobile AR tool for surgical planning. ARCollab is open-source, available at https://github.com/poloclub/arcollab.","accessible_pdf":true,"authors":[{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"pratham.mehta001@gmail.com","is_corresponding":true,"name":"Pratham Darrpan Mehta"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"rnarayanan39@gatech.edu","is_corresponding":false,"name":"Rahul Ozhur Narayanan"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"harsha5431@gmail.com","is_corresponding":false,"name":"Harsha Karanth"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"alexanderyang@gatech.edu","is_corresponding":false,"name":"Haoyang Yang"},{"affiliations":["Emory University, Atlanta, United States"],"email":"slesnickt@kidsheart.com","is_corresponding":false,"name":"Timothy C Slesnick"},{"affiliations":["Emory University/Children's Healthcare of Atlanta, Atlanta, United States"],"email":"fawwaz.shaw@choa.org","is_corresponding":false,"name":"Fawwaz Shaw"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"polo@gatech.edu","is_corresponding":false,"name":"Duen Horng (Polo) Chau"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1292","image_caption":"ARCollab is a collaborative cardiovascular surgical planning application in mobile augmented reality. Multiple users can join a shared session and view a patient's 3D heart model from different perspectives. ARCollab allows surgeons and cardiologists to collaboratively interact with a 3D heart model in real-time. Our evaluation of ARCollab's usability and usefulness in enhancing collaboration, conducted with three cardiothoracic surgeons and two cardiologists, marks the first human evaluation of a multi-user mobile AR tool for surgical planning. ARCollab is open-source, available at https://github.com/poloclub/arcollab. ","keywords":["Augmented Reality, Mobile Collaboration, Surgical Planning"],"open_access_supplemental_link":"https://github.com/poloclub/arcollab","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.03249","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/__o_dwELzN8&t=0h56m10s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1292/v-short-1292_Preview.mp4?token=JCa98Nvp5ZJr4ODeYdcidV_gsxCoxlhfyo11Q6ciMY0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1292/v-short-1292_Preview.srt?token=qGpinD0qCxaulavLG2bfshSvMgq5n_bKTX0HAb2kwAs&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Scientific and Immersive Visualization","session_uid":"v-short","session_youtube_ff_id":"iZMV5ADTBO4","session_youtube_ff_link":"https://youtu.be/iZMV5ADTBO4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/__o_dwELzN8&t=0h56m10s","sessions":["Short Papers: Scientific and Immersive Visualization"],"time_stamp":"2024-10-16T16:54:00Z","title":"Multi-User Mobile Augmented Reality for Cardiovascular Surgical Planning","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1059","abstract":"Gantt charts are a widely-used idiom for visualizing temporal discrete event sequence data where dependencies exist between events. They are popular in domains such as manufacturing and computing for their intuitive layout of such data. However, these domains frequently generate data at scales which tax both the visual representation and the ability to render it at interactive speeds. To aid visualization developers who use Gantt charts in these situations, we develop a task taxonomy of low level visualization tasks supported by Gantt charts and connect them to the data queries needed to support them. Our taxonomy is derived through a literature survey of visualizations using Gantt charts over the past 30 years.","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"sayefsakin@sci.utah.edu","is_corresponding":true,"name":"Sayef Azad Sakin"},{"affiliations":["The University of Utah, Salt Lake City, United States"],"email":"kisaacs@sci.utah.edu","is_corresponding":false,"name":"Katherine E. Isaacs"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1059","image_caption":"Gantt charts are popular in project planning, process scheduling, and progress tracking for visualizing interdependent temporal event sequences. Typically, data is organized by temporal order on one axis and the other by grouping events with relevant factors. Our literature-based visualization task taxonomy helps in designing Gantt charts with large number of events by aligning prevalent visual tasks with relevant data queries. These provide a foundation for identifying and developing data management strategies to scale up visual interactivity in Gantt Charts.","keywords":["Gantt chart\u2014Visualization\u2014Task taxonomy"],"open_access_supplemental_link":"https://osf.io/8k79r/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.04050","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Dx83B4g1W5A&t=0h45m30s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1059/v-short-1059_Preview.mp4?token=AA-7sRm2dRmgNqzQ9NbGsKAYKGiJVgGMXQzn8vK3ySo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1059/v-short-1059_Preview.srt?token=iIARTsHoNlynFZ_r5Ktv12EVvzBs79LzQgF7UYJ4jCg&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"a85BN_1AgEE","session_youtube_ff_link":"https://youtu.be/a85BN_1AgEE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h45m30s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T13:15:00Z","title":"A Literature-based Visualization Task Taxonomy for Gantt Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1072","abstract":"Recent advancements in vision models have greatly improved their ability to handle complex chart understanding tasks, like chart captioning and question answering. However, it remains challenging to assess how these models process charts. Existing benchmarks only roughly evaluate model performance without evaluating the underlying mechanisms, such as how models extract image embeddings. This limits our understanding of the model's ability to perceive fundamental graphical components. To address this, we introduce a novel evaluation framework to assess the graphical perception of image embedding models. For chart comprehension, we examine two main aspects of channel effectiveness: accuracy and discriminability of various visual channels. Channel accuracy is assessed through the linearity of embeddings, measuring how well the perceived magnitude aligns with the size of the stimulus. Discriminability is evaluated based on the distances between embeddings, indicating their distinctness. Our experiments with the CLIP model show that it perceives channel accuracy differently from humans and shows unique discriminability in channels like length, tilt, and curvature. We aim to develop this work into a broader benchmark for reliable visual encoders, enhancing models for precise chart comprehension and human-like perception in future applications.","accessible_pdf":false,"authors":[{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"dtngus0111@gmail.com","is_corresponding":true,"name":"Soohyun Lee"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"jangsus1@snu.ac.kr","is_corresponding":false,"name":"Minsuk Chang"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"shpark@hcil.snu.ac.kr","is_corresponding":false,"name":"Seokhyeon Park"},{"affiliations":["Seoul National University, Seoul, Korea, Republic of"],"email":"jseo@snu.ac.kr","is_corresponding":false,"name":"Jinwook Seo"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1072","image_caption":"An image showing how differently the image embedding model perceives changes in different visual channels. Peaks represent thresholds where the model perceives significant differences between images, indicating the discriminability of each channel.","keywords":["Graphical perception, channel effectiveness, image embeddings, clip"],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.20845","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Dx83B4g1W5A&t=0h26m40s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1072/v-short-1072_Preview.mp4?token=oWDGK7OGFYWlhpETPpd7Cs6OTMqdpsTMLkMzxcdeBcE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"1o5g7_3J40g","session_youtube_ff_link":"https://youtu.be/1o5g7_3J40g","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h26m40s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T12:57:00Z","title":"Assessing Graphical Perception of Image Embedding Models using Channel Effectiveness","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1081","abstract":"Sine illusion happens when the more quickly changing pairs of lines lead to bigger underestimates of the delta between them.We evaluate three visual manipulations on mitigating sine illusions: dotted lines, aligned gridlines, and offset gridlines via a user study. We asked participants to compare the deltas between two lines at two time points and found aligned gridlines to be the most effective in mitigating sine illusions.Using data from the user study, we produced a model that predicts the impact of the sine illusion in line charts by accounting for the ratio of the vertical distance between the two points of comparison. When the ratio is less than 50\\%, participants begin to be influenced by the sine illusion. This effect can be significantly exacerbated when the difference between the two deltas falls under 30\\%.We compared two explanations for the sine illusion based on our data: either participants were mistakenly using the perpendicular distance between the two lines to make their comparison (the perpendicular explanation), or they incorrectly relied on the length of the line segment perpendicular to the angle bisector of the bottom and top lines (the equal triangle explanation). We found the equal triangle explanation to be the more predictive model explaining participant behaviors.","accessible_pdf":true,"authors":[{"affiliations":["Google LLC, San Francisco, United States"],"email":"cknit1999@gmail.com","is_corresponding":false,"name":"Clayton J Knittel"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"jawuah3@gatech.edu","is_corresponding":false,"name":"Jane Awuah"},{"affiliations":["Northwestern University, Evanston, United States"],"email":"franconeri@northwestern.edu","is_corresponding":false,"name":"Steven L Franconeri"},{"affiliations":["Georgia Tech, Atlanta, United States"],"email":"cxiong@gatech.edu","is_corresponding":true,"name":"Cindy Xiong Bearfield"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1081","image_caption":"Looking at this visualization of two lines depicting the revenue of two products over time. Product A is consistently doing better than Product B, and thus have higher revenue throughout time. Both products' revenue are growing, with their line slopes increasing over time. Your task it to compare whether the difference between their revenue, or the deltas between the two lines, are bigger at an earlier time (Time 1), or a later time (Time 2). While it may be tempting to say the difference is bigger at Time 1, the correct answer is Time 2. This is a visual illusion commonly referred to as the sine illusion. It is an underestimation of the difference between two lines when both lines have increasing slopes.","keywords":["sine illusion, gridlines, perception, bias, thresholds"],"open_access_supplemental_link":"https://osf.io/kq87n/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"http://arxiv.org/abs/2408.00854","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Dx83B4g1W5A&t=1h5m25s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1081/v-short-1081_Preview.mp4?token=YKdYyRVKX0qJ9XXs4G9WUwY_svqHkLGqf694bT-Kj5M&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1081/v-short-1081_Preview.srt?token=jRuND1m9uPluMnSvV5z9FR-91RLjE2qTAMQ5uA0zIN0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"siQIBjM26Wg","session_youtube_ff_link":"https://youtu.be/siQIBjM26Wg","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=1h5m25s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T13:33:00Z","title":"Gridlines Mitigate Sine Illusion in Line Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1109","abstract":"Homophily refers to the tendency of individuals to associate with others who are similar to them in characteristics, such as, race, ethnicity, age, gender, or interests. In this paper, we investigate if individuals exhibit racial homophily when viewing visualizations, using mass shooting data in the United States as the example topic. We conducted a crowdsourced experiment (N=450) where each participant was shown a visualization displaying the counts of mass shooting victims, highlighting the counts for one of three racial groups (White, Black, or Hispanic). Participants were assigned to view visualizations highlighting their own race or a different race to assess the influence of racial concordance on changes in affect (emotion) and attitude towards gun control. While we did not find evidence of homophily, the results showed a significant negative shift in affect across all visualization conditions. Notably, political ideology significantly impacted changes in affect, with more liberal views correlating with a more negative affect change. Our findings underscore the complexity of reactions to mass shooting visualizations and suggest that future research should consider various methodological improvements to better assess homophily effects.","accessible_pdf":false,"authors":[{"affiliations":["New York University, Brooklyn, United States"],"email":"pt2393@nyu.edu","is_corresponding":true,"name":"Poorna Talkad Sukumar"},{"affiliations":["New York University, Brooklyn, United States"],"email":"mporfiri@nyu.edu","is_corresponding":false,"name":"Maurizio Porfiri"},{"affiliations":["New York University, New York, United States"],"email":"onov@nyu.edu","is_corresponding":false,"name":"Oded Nov"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1109","image_caption":"One of the three conditions used in our experiment consisting of a bar chart of the counts of victims in mass shootings in the United States from 2013 to 2023, highlighting the counts of Hispanic victims. The other two conditions consist of the same bar chart but highlight the counts of White and Black victims, respectively. ","keywords":["Visualization; Journalism; Mass shootings; Race; Homophily"],"open_access_supplemental_link":"https://osf.io/3crqx/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/pdf/2408.03269","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Dx83B4g1W5A&t=0h36m45s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1109/v-short-1109_Preview.mp4?token=rHi1UHfRawEcazbN1rhXYIgginvswT3o2SwjyJkh0tg&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"5MyW9ssiG3s","session_youtube_ff_link":"https://youtu.be/5MyW9ssiG3s","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h36m45s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T13:06:00Z","title":"Connections Beyond Data: Exploring Homophily With Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1116","abstract":"Visualizations support rapid analysis of scientific datasets, allowing viewers to glean aggregate information (e.g., the mean) within split-seconds. While prior research has explored this ability in conventional charts, it is unclear if spatial visualizations used by computational scientists afford a similar ensemble perception capacity. We investigate people's ability to estimate two summary statistics, mean and variance, from pseudocolor scalar fields. In a crowdsourced experiment, we find that participants can reliably characterize both statistics, although variance discrimination requires a much stronger signal. Multi-hue and diverging colormaps outperformed monochromatic, luminance ramps in aiding this extraction. Analysis of qualitative responses suggests that participants often estimate the distribution of hotspots and valleys as visual proxies for data statistics. These findings suggest that people's summary interpretation of spatial datasets is likely driven by the appearance of discrete color segments, rather than assessments of overall luminance. Implicit color segmentation in quantitative displays could thus prove more useful than previously assumed by facilitating quick, gist-level judgments about color-coded visualizations.","accessible_pdf":false,"authors":[{"affiliations":["Argonne National Laboratory, Lemont, United States"],"email":"vmateevitsi@anl.gov","is_corresponding":false,"name":"Victor A. Mateevitsi"},{"affiliations":["Argonne National Laboratory, Lemont, United States","University of Illinois Chicago, Chicago, United States"],"email":"papka@anl.gov","is_corresponding":false,"name":"Michael E. Papka"},{"affiliations":["Indiana University, Indianapolis, United States"],"email":"redak@iu.edu","is_corresponding":false,"name":"Khairi Reda"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1116","image_caption":"We studied whether people can rapidly perceive two ensemble statistics from scalar fields: the mean and variation. The figure illustrates the experimental procedures we used to evaluate this capacity.","keywords":["Ensemble perception, colormaps, scalar fields"],"open_access_supplemental_link":"https://osf.io/h8mn2/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2406.14452","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Dx83B4g1W5A&t=0h10m2s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1116/v-short-1116_Preview.mp4?token=-2B8GvKPTbcGmjcSkOpRvnNnMWzdgzCHMnvQ4MO_vKg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1116/v-short-1116_Preview.srt?token=k8x_VR5-N0DudnoKoiYijSup6XtSz5uG2wKefJzoKC4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"lIUx96SZ0N4","session_youtube_ff_link":"https://youtu.be/lIUx96SZ0N4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h10m2s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T12:39:00Z","title":"Science in a Blink: Supporting Ensemble Perception in Scalar Fields","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1184","abstract":"To improve the perception of hierarchical structures in data sets, several color map generation algorithms have been proposed to take this structure into account. But the design of hierarchical color maps elicits different requirements to those of color maps for tabular data. Within this paper, we make an initial effort to put design rules from the color map literature into the context of hierarchical color maps. We investigate the impact of several design decisions and provide recommendations for various analysis scenarios. Thus, we lay the foundation for objective quality criteria to evaluate hierarchical color maps.","accessible_pdf":true,"authors":[{"affiliations":["Fraunhofer IGD, Darmstadt, Germany"],"email":"tobias.mertz@igd.fraunhofer.de","is_corresponding":true,"name":"Tobias Mertz"},{"affiliations":["Fraunhofer IGD, Darmstadt, Germany","TU Darmstadt, Darmstadt, Germany"],"email":"joern.kohlhammer@igd.fraunhofer.de","is_corresponding":false,"name":"J\u00f6rn Kohlhammer"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1184","image_caption":"The results of three different configurations of the popular Tree Colors algorithm for generating hierarchical color maps. The configurations produce color maps with different characteristics that are suitable for different analysis scenarios. Within this paper, we investigate the impact of six different design rules on hierarchical color map design in different analysis scenarios, to be able to decide which configuration suits our scenarios best.","keywords":["Guidelines, Color, Graph/Network and Tree Data."],"open_access_supplemental_link":"https://arxiv.org/abs/2407.08287","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.08287","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Dx83B4g1W5A&t=0h19m0s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1184/v-short-1184_Preview.mp4?token=bLQpPzptN0CYuUR8xEdKMS11SJ_f0XXAboOPiNdryYE&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"jtKTnjVQ_wQ","session_youtube_ff_link":"https://youtu.be/jtKTnjVQ_wQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h19m0s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T12:48:00Z","title":"Towards a Quality Approach to Hierarchical Color Maps","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1274","abstract":"This study examines the impact of positive and negative contrast polarities (i.e., light and dark modes) on the performance of younger adults and people in their late adulthood (PLA). In a crowdsourced study with 134 participants (69 below age 60, 66 aged 60 and above), we assessed their accuracy and time performing analysis tasks across three common visualization types (Bar, Line, Scatterplot) and two contrast polarities (positive and negative). We observed that, across both age groups, the polarity that led to better performance and the resulting amount of improvement varied on an individual basis, with each polarity benefiting comparable proportions of participants. However, the contrast polarity that led to better performance did not always match their preferred polarity. Additionally, we observed that the choice of contrast polarity can have an impact on time similar to that of the choice of visualization type, resulting in an average percent difference of around 36%. These findings indicate that, overall, the effects of contrast polarity on visual analysis performance do not noticeably change with age. Furthermore, they underscore the importance of making visualizations available in both contrast polarities to better-support a broad audience with differing needs. Supplementary materials for this work can be found at https://osf.io/539a4/.","accessible_pdf":false,"authors":[{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"zwhile@cs.umass.edu","is_corresponding":true,"name":"Zack While"},{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"asarv@cs.umass.edu","is_corresponding":false,"name":"Ali Sarvghad"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1274","image_caption":"Two rows of data visualizations, each row consisting of 3 visualizations: a scatterplot, bar chart, and line chart, respectively. The top row uses positive contrast, also known as light mode, while the bottom row uses negative contrast, also known as dark mode.","keywords":["people in late adulthood, GerontoVis, data visualization, contrast polarity"],"open_access_supplemental_link":"https://osf.io/539a4","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Dx83B4g1W5A&t=0h0m35s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1274/v-short-1274_Preview.mp4?token=oJ4pXma3alci88ApsX1vKqwlwbRciFJVinzLFrhA09I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1274/v-short-1274_Preview.srt?token=9gTOBheCSX7NlhM3wl2VgFcitssodWZdQxSagZEIqXQ&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"--dzVG5Ti8w","session_youtube_ff_link":"https://youtu.be/--dzVG5Ti8w","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h0m35s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T12:30:00Z","title":"Dark Mode or Light Mode? Exploring the Impact of Contrast Polarity on Visualization Performance Between Age Groups","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1301","abstract":"\"Reactionary delay\" is a result of the accumulated cascading effects of knock-on train delays which is increasing on UK railways due to increasing utilisation of the railway infrastructure. The chaotic nature of its effects on train lateness is notoriously hard to predict. We use a stochastic Monte-Carto-style simulation of reactionary delay that produces whole distributions of likely reactionary delay and delays this causes. We demonstrate how Zoomable Level-of-Detail ChartTables - case-by-variable tables where cases are rows, variables are columns, variables are complex composite metrics that incorporate distributions, and cells contain mini-charts that depict these as different levels of detail through zoom interaction - help interpret whole distributions of model outputs to help understand the causes and effects of reactionary delay, how they inform timetable robustness testing, and how they could be used in other contexts.","accessible_pdf":false,"authors":[{"affiliations":["City, University of London, London, United Kingdom"],"email":"a.slingsby@city.ac.uk","is_corresponding":true,"name":"Aidan Slingsby"},{"affiliations":["Risk Solutions, Warrington, United Kingdom"],"email":"jonathan.hyde@risksol.co.uk","is_corresponding":false,"name":"Jonathan Hyde"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1301","image_caption":"A Zoomable Level-of-Detail ChartTable, in which train delay metrics (columns) are represented as mini-charts for each train (row).","keywords":["Level-of-detail, mini-charts, distributions, stochastic modelling."],"open_access_supplemental_link":"https://osf.io/u2ykd/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"http://arxiv.org/abs/2408.01203","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/Dx83B4g1W5A&t=0h55m10s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1301/v-short-1301_Preview.mp4?token=BMyADGaR-JCzamFZSa7_nHAFd0sZ2xMO1MoowOJ3Ui8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short6","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Perception and Representation","session_uid":"v-short","session_youtube_ff_id":"oBxNVn63rEM","session_youtube_ff_link":"https://youtu.be/oBxNVn63rEM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/Dx83B4g1W5A&t=0h55m10s","sessions":["Short Papers: Perception and Representation"],"time_stamp":"2024-10-17T13:24:00Z","title":"Zoomable Level-of-Detail ChartTables for Interpreting Probabilistic Model Outputs for Reactionary Train Delays","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1062","abstract":"Annotations are a critical component of visualizations, helping viewers interpret the visual representation and highlighting critical data insights. Despite their significant role, we lack an understand- ing of how annotations can be incorporated into other data representations, such as physicalizations and sonifications. Given the emergent nature of these representations, sonifications, and physicalizations lack formalized conventions (e.g., design space, vocabulary) that can introduce challenges for audiences to interpret the intended data encoding. To address this challenge, this work focuses on how annotations can be more tightly integrated into the design process of creating sonifications and physicalizations. In an exploratory study with 13 designers, we explore how visualization annotation techniques can be adapted to sonic and physical modalities. Our work highlights how annotations for sonification and physicalizations are inseparable from their data encodings.","accessible_pdf":false,"authors":[{"affiliations":["Whitman College, Walla Walla, United States"],"email":"sorensor@whitman.edu","is_corresponding":false,"name":"Rhys Sorenson-Graff"},{"affiliations":["University of Colorado Boulder, Boulder, United States"],"email":"sandra.bae@colorado.edu","is_corresponding":false,"name":"S. Sandra Bae"},{"affiliations":["Whitman College, Walla Walla, United States"],"email":"wirfsbro@colorado.edu","is_corresponding":true,"name":"Jordan Wirfs-Brock"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1062","image_caption":"Examples of geometric annotations used in a visualization, sonification, and physicalization. Geometric annotations draw attention to a specific section of the data representation, providing additional context, detail, and clarity to a section if it contains crucial information or is of significant interest to the viewer. Visualizations can integrate geometric annotations with call-out boxes. Sonifications can highlight specific excerpts using sub-clips of audio. Physicalizations can present multiple frames of reference to emphasize different perspectives that zoom in and out of the physicalization (photo credit to Klauss et al.)","keywords":["Annotations, physicalization, sonification"],"open_access_supplemental_link":"https://osf.io/wu6g9/","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.04574","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/VJvUj8FNBsU&t=0h47m56s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1062/v-short-1062_Preview.mp4?token=fZ4-LL2x7Xo3IEq6PbGThmlQ6hE3if6_RtUivcv_1qc&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"ANwzcGZYe8E","session_youtube_ff_link":"https://youtu.be/ANwzcGZYe8E","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h47m56s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T15:00:00Z","title":"Integrating Annotations into the Design Process for Sonifications and Physicalizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1068","abstract":"Integrating textual content, such as titles, annotations, and captions, with visualizations facilitates comprehension and takeaways during data exploration. Yet current tools often lack mechanisms for integrating meaningful long-form prose with visual data. This paper introduces DASH, a bimodal data exploration tool that supports integrating semantic levels into the interactive process of visualization and text-based analysis. DASH operationalizes a modified version of Lundgard et al.\u2019s semantic hierarchy model that categorizes data descriptions into four levels ranging from basic encodings to high-level insights. By leveraging this structured semantic level framework and a large language model\u2019s text generation capabilities, DASH enables the creation of data-driven narratives via drag-and-drop user interaction. Through a preliminary user evaluation, we discuss the utility of DASH\u2019s text and chart integration capabilities when participants perform data exploration with the tool.","accessible_pdf":true,"authors":[{"affiliations":["Tableau Research, Seattle, United States"],"email":"bromley.denny@gmail.com","is_corresponding":true,"name":"Dennis Bromley"},{"affiliations":["Tableau Research, Palo Alto, United States"],"email":"vsetlur@tableau.com","is_corresponding":false,"name":"Vidya Setlur"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1068","image_caption":"DASH is an interactive bimodal data analysis system that facilitates drag-and-drop analysis between text and visual representations of data. Users can expand on chart marks or text phrases by dragging them to DASH\u2019s text region, or drill down into them by dragging them to DASH\u2019s chart region. Using a modified Lundgard et al semantic hierarchy, DASH helps users create data analyses that combine high-level insights with low-level supporting visualizations.","keywords":["Semantic levels, LLMs, text generation"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.01011","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/VJvUj8FNBsU&t=0h9m45s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1068/v-short-1068_Preview.mp4?token=ofxmxp8U-50dbgsNLkj35JCykoRs7C8S6IINtrBo8p4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1068/v-short-1068_Preview.srt?token=HsSn0QkGmegjSHB8PhO6Wjuk5OUpxFRfasDCsQLE7Ww&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"3Jlkw_OKzlE","session_youtube_ff_link":"https://youtu.be/3Jlkw_OKzlE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h9m45s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T14:24:00Z","title":"DASH: A Bimodal Data Exploration Tool for Interactive Text and Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1078","abstract":"Data visualizations are reaching global audiences. As people who use Right-to-left (RTL) scripts constitute over a billion potential data visualization users, a need emerges to investigate how visualizations are communicated to them. Web design guidelines exist to assist designers in adapting different reading directions, yet we lack a similar standard for visualization design. This paper investigates the design patterns of visualizations with RTL scripts. We collected 128 visualizations from data-driven articles published in Arabic news outlets and analyzed their chart composition, textual elements, and sources. Our analysis suggests that designers tend to apply RTL approaches more frequently for categorical data. In other situations, we observed a mix of Left-to-right (LTR) and RTL approaches for chart directions and structures, sometimes inconsistently utilized within the same article. We reflect on this lack of clear guidelines for RTL data visualizations and derive implications for visualization authoring tools and future research directions.","accessible_pdf":true,"authors":[{"affiliations":["University College London, London, United Kingdom","UAE University , Al Ain, United Arab Emirates"],"email":"muna.alebri.19@ucl.ac.uk","is_corresponding":true,"name":"Muna Alebri"},{"affiliations":["Worcester Polytechnic Institute, Worcester, United States"],"email":"ntrakotondravony@wpi.edu","is_corresponding":false,"name":"No\u00eblle Rakotondravony"},{"affiliations":["Worcester Polytechnic Institute, Worcester, United States"],"email":"ltharrison@wpi.edu","is_corresponding":false,"name":"Lane Harrison"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1078","image_caption":"Data visualizations from two articles available in Arabic and other left-to-right languages. The bar chart shows categorical data points that are non-ordinal (source: Inkyfada). The line chart shows ordered data points, its x-axis represents time sequence. Both charts are mirrored and their orientation follows the direction of the article language, i.e. from right to left for Arabic and left to right for English. The position of the logo of the journal, and the mention of the data source are also mirrored when switching between visualization in RTL and LTR languages.","keywords":["Design Patterns, Right-To-Left Visualizations, Data Journalism"],"open_access_supplemental_link":"https://rdr.ucl.ac.uk/articles/dataset/Code_book_of_RTL_visualization_in_Arabic_News_media/26150749/1","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://discovery.ucl.ac.uk/id/eprint/10194127/","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/VJvUj8FNBsU&t=0h0m40s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1078/v-short-1078_Preview.mp4?token=CnVCQt3ox-J30yoeusQPdACfd-PZQs0bBldoBUnHbqM&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1078/v-short-1078_Preview.srt?token=U7RTz016QX5hdE33XJ4y3nco8gC6-cpdsMTjLcL7ziY&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"87XnPiyYb1U","session_youtube_ff_link":"https://youtu.be/87XnPiyYb1U","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h0m40s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T14:15:00Z","title":"Design Patterns in Right-to-Left Visualizations: The Case of Arabic Content","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1079","abstract":"Image datasets serve as the foundation for machine learning models in computer vision, significantly influencing model capabilities, performance, and biases alongside architectural considerations. Therefore, understanding the composition and distribution of these datasets has become increasingly crucial. To address the need for intuitive exploration of these datasets, we propose AEye, an extensible and scalable visualization tool tailored to image datasets. AEye utilizes a contrastively trained model to embed images into semantically meaningful high-dimensional representations, facilitating data clustering and organization. To visualize the high-dimensional representations, we project them onto a two-dimensional plane and arrange images in layers so users can seamlessly navigate and explore them interactively. AEye facilitates semantic search functionalities for both text and image queries, enabling users to search for content. We open-source the codebase for AEye, and provide a simple configuration to add datasets. ","accessible_pdf":true,"authors":[{"affiliations":["ETH Zurich, Zurich, Switzerland"],"email":"fgroetschla@ethz.ch","is_corresponding":true,"name":"Florian Gr\u00f6tschla"},{"affiliations":["ETH Zurich, Zurich, Switzerland"],"email":"lanzendoerfer@ethz.ch","is_corresponding":false,"name":"Luca A Lanzend\u00f6rfer"},{"affiliations":["ETH Zurich, Zurich, Switzerland"],"email":"mcalzavara@student.ethz.ch","is_corresponding":false,"name":"Marco Calzavara"},{"affiliations":["ETH Zurich, Zurich, Switzerland"],"email":"wattenhofer@ethz.ch","is_corresponding":false,"name":"Roger Wattenhofer"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1079","image_caption":"Overview of the AEye interface. Images are positioned according to their location in the CLIP embedding space and arranged in layers that the user can navigate by zooming. Top left: Dataset selector, Top middle: Search bar for semantic text and image search. Top right: Show information about the application. Bottom right: Minimap of the embedding space. ","keywords":["Image embeddings, image visualization, contrastive learning, semantic search."],"open_access_supplemental_link":"https://github.com/ETH-DISCO/aeye","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.04072","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/VJvUj8FNBsU&t=0h56m23s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1079/v-short-1079_Preview.mp4?token=n3u7b786e-vMYMvE_IQ5U9cjS2frtaZk02wVGgfo0KE&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1079/v-short-1079_Preview.srt?token=hFQcl6SIBRsQTGVASl2lxRII2B4p0D9G-Bu6d5HgxoI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"JdTXigyYkkw","session_youtube_ff_link":"https://youtu.be/JdTXigyYkkw","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h56m23s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T15:09:00Z","title":"AEye: A Visualization Tool for Image Datasets","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1100","abstract":"Confidence scores of automatic speech recognition (ASR) outputs are often inadequately communicated, preventing its seamless integration into analytical workflows. In this paper, we introduce Confides, a visual analytic system developed in collaboration with intelligence analysts to address this issue. Confides aims to aid exploration and post-AI-transcription editing by visually representing the confidence associated with the transcription. We demonstrate how our tool can assist intelligence analysts who use ASR outputs in their analytical and exploratory tasks and how it can help mitigate misinterpretation of crucial information. We also discuss opportunities for improving textual data cleaning and model transparency for human-machine collaboration.","accessible_pdf":true,"authors":[{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"sha@wustl.edu","is_corresponding":true,"name":"Sunwoo Ha"},{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"chaelim@wustl.edu","is_corresponding":false,"name":"Chaehun Lim"},{"affiliations":["Smith College, Northampton, United States"],"email":"jcrouser@smith.edu","is_corresponding":false,"name":"R. Jordan Crouser"},{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"alvitta@wustl.edu","is_corresponding":false,"name":"Alvitta Ottley"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1100","image_caption":"Overview of Confides: (a) The collapsible side menu contains controls for selecting, uploading, and transcribing audio files. (b) At the top of the dashboard are the audio player and search bar. (c) The confidence overview displays the length and average confidence value of each line segment in the transcription (encoded by the width and opacity of each rectangle, respectively). (d) The word tree provides context to a specific search term and shows which words most often follow or precede it. (e) The user can view and edit the transcription; each word is underlined, and its opacity indicates the confidence score.","keywords":["Visual analytics, confidence visualization, automatic speech recognition"],"open_access_supplemental_link":"https://github.com/washuvis/vis2024confides","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2405.00223","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/VJvUj8FNBsU&t=0h39m10s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1100/v-short-1100_Preview.mp4?token=MEgqXbErF2uaQUneqbipGJAuaMBY8pOPLCDzbdEDAtU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"tBOVI_-pLQ4","session_youtube_ff_link":"https://youtu.be/tBOVI_-pLQ4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h39m10s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T14:51:00Z","title":"Confides: A Visual Analytics Solution for Automated Speech Recognition Analysis and Exploration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1144","abstract":"Reconstruction of 3D scenes from 2D images is a technical challenge that impacts domains from Earth and planetary sciences and space exploration to augmented and virtual reality. Typically, reconstruction algorithms first identify common features across images and then minimize reconstruction errors after estimating the shape of the terrain. This bundle adjustment (BA) step optimizes around a single, simplifying scalar value that obfuscates many possible causes of reconstruction errors (e.g., initial estimate of the position and orientation of the camera, lighting conditions, ease of feature detection in the terrain). Reconstruction errors can lead to inaccurate scientific inferences or endanger a spacecraft exploring a remote environment. To address this challenge, we present VECTOR, a visual analysis tool that improves error inspection for stereo reconstruction BA. VECTOR provides analysts with previously unavailable visibility into feature locations, camera pose, and computed 3D points. VECTOR was developed in partnership with the Perseverance Mars Rover and Ingenuity Mars Helicopter terrain reconstruction team at the NASA Jet Propulsion Laboratory. We report on how this tool was used to debug and improve terrain reconstruction for the Mars 2020 mission.","accessible_pdf":false,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"racquel.fygenson@gmail.com","is_corresponding":true,"name":"Racquel Fygenson"},{"affiliations":["Weta FX, Auckland, New Zealand"],"email":"kjawad@andrew.cmu.edu","is_corresponding":false,"name":"Kazi Jawad"},{"affiliations":["Art Center, Pasadena, United States"],"email":"zongzhanisabelli@gmail.com","is_corresponding":false,"name":"Zongzhan Li"},{"affiliations":["California Institute of Technology, Pasadena, United States"],"email":"francois.ayoub@jpl.nasa.gov","is_corresponding":false,"name":"Francois Ayoub"},{"affiliations":["California Institute of Technology, Pasadena, United States"],"email":"bob.deen@jpl.nasa.gov","is_corresponding":false,"name":"Robert G Deen"},{"affiliations":["California Institute of Technology, Pasadena, United States"],"email":"sd@scottdavidoff.com","is_corresponding":false,"name":"Scott Davidoff"},{"affiliations":["Carnegie Mellon University, Pittsburgh, United States"],"email":"domoritz@cmu.edu","is_corresponding":false,"name":"Dominik Moritz"},{"affiliations":["NASA-JPL, Pasadena, United States"],"email":"mauricio.a.hess.flores@jpl.nasa.gov","is_corresponding":false,"name":"Mauricio Hess-Flores"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1144","image_caption":"We present VECTOR, software that visualizes 3D reconstruction error for easier comprehension and more informed input modification. VECTOR consists of image views that superimpose residual error vectors on top of input images and 3-dimensional camera views that show spatially how multiple images are calibrated by a reconstruction algorithm to render a 3D output.","keywords":["Computer vision, stereo image processing, optimization, error analysis, uncertainty, SLAM, SfM, robotics"],"open_access_supplemental_link":"https://github.com/NASA-AMMOS/VECTOR","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.03503","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/VJvUj8FNBsU&t=1h5m7s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1144/v-short-1144_Preview.mp4?token=YSmFa8I-T48gXtFd-NX4Gy8_zyKOP8lr0uTaoqgXJNw&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1144/v-short-1144_Preview.srt?token=D8vJ9rJe-6EA1yRWkHrW_f_MVYtiaPG9wnNc3zKr4F0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"M97VBVFg46E","session_youtube_ff_link":"https://youtu.be/M97VBVFg46E","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=1h5m7s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T15:18:00Z","title":"Opening the Black Box of 3D Reconstruction Error Analysis with VECTOR","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1236","abstract":"Automatically generating data visualizations in response to human utterances on datasets necessitates a deep semantic understanding of the utterance, including implicit and explicit references to data attributes, visualization tasks, and necessary data preparation steps. Natural Language Interfaces (NLIs) for data visualization have explored ways to infer such information, yet challenges persist due to inherent uncertainty in human speech. Recent advances in Large Language Models (LLMs) provide an avenue to address these challenges, but their ability to extract the relevant semantic information remains unexplored. In this study, we evaluate four publicly available LLMs (GPT-4, Gemini-Pro, Llama3, and Mixtral), investigating their ability to comprehend utterances even in the presence of uncertainty and identify the relevant data context and visual tasks. Our findings reveal that LLMs are sensitive to uncertainties in utterances. Despite this sensitivity, they are able to extract the relevant data context. However, LLMs struggle with inferring visualization tasks. Based on these results, we highlight future research directions on using LLMs for visualization generation. Our supplementary materials have been shared on GitHub: https://github.com/hdi-umd/Semantic_Profiling_LLM_Evaluation.","accessible_pdf":true,"authors":[{"affiliations":["University of Maryland, College Park, United States"],"email":"hbako@umd.edu","is_corresponding":true,"name":"Hannah K. Bako"},{"affiliations":["University of Maryland, College Park, United States"],"email":"arshnoorbhutani8@gmail.com","is_corresponding":false,"name":"Arshnoor Bhutani"},{"affiliations":["The University of Texas at Austin, Austin, United States"],"email":"xinyi.liu@utexas.edu","is_corresponding":false,"name":"Xinyi Liu"},{"affiliations":["University of Maryland, College Park, United States"],"email":"kcobbina@cs.umd.edu","is_corresponding":false,"name":"Kwesi Adu Cobbina"},{"affiliations":["University of Maryland, College Park, United States"],"email":"leozcliu@umd.edu","is_corresponding":false,"name":"Zhicheng Liu"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1236","image_caption":"The image presents a study evaluating the semantic profiling abilities of large language models (LLMs) for natural language utterances in data visualization tasks, analyzing clarity, data context extraction, and task classification across 500 utterances and 37 datasets.","keywords":["Human-centered computing\u2014Visualization\u2014Empirical studies in visualization;"],"open_access_supplemental_link":"https://github.com/hdi-umd/Semantic_Profiling_LLM_Evaluation/","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2407.06129","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/VJvUj8FNBsU&t=0h19m26s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1236/v-short-1236_Preview.mp4?token=I3UvrII-G1uCgR91evMvz1wVxYcuroghU4YBe1mKrSc&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1236/v-short-1236_Preview.srt?token=dfnnBvj8b0vGXpsosgMgxryg0CSmgulBZEyMgaOeWyI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"hZQ9TFfCsvM","session_youtube_ff_link":"https://youtu.be/hZQ9TFfCsvM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h19m26s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T14:33:00Z","title":"Evaluating the Semantic Profiling Abilities of LLMs for Natural Language Utterances in Data Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1276","abstract":"Machine Learning models for chart-grounded Q&A (CQA) often treat charts as images, but performing CQA on pixel values has proven challenging. We thus investigate a resource overlooked by current ML-based approaches: the declarative documents describing how charts should visually encode data (i.e., chart specifications). In this work, we use chart specifications to enhance language models (LMs) for chart-reading tasks, such that the resulting system can robustly understand language for CQA. Through a case study with 359 bar charts, we test novel fine tuning schemes on both GPT-3 and T5 using a new dataset curated for two CQA tasks: question-answering and visual explanation generation. Our text-only approaches strongly outperform vision-based GPT-4 on explanation generation (99% vs. 63% accuracy), and show promising results for question-answering (57-67% accuracy). Through in-depth experiments, we also show that our text-only approaches are mostly robust to natural language variation.","accessible_pdf":true,"authors":[{"affiliations":["Adobe Research, San Jose, United States"],"email":"victorbursztyn2022@u.northwestern.edu","is_corresponding":false,"name":"Victor S. Bursztyn"},{"affiliations":["Adobe Research, Seattle, United States"],"email":"jhoffs@adobe.com","is_corresponding":true,"name":"Jane Hoffswell"},{"affiliations":["Adobe Research, San Jose, United States"],"email":"sguo@adobe.com","is_corresponding":false,"name":"Shunan Guo"},{"affiliations":["Adobe Research, San Jose, United States"],"email":"eunyee@adobe.com","is_corresponding":false,"name":"Eunyee Koh"}],"award":"","doi":"","event_id":"v-short","event_title":"VIS Short Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1276","image_caption":"We explore two main tasks related to chart-grounded Q&A: question answering (QA) and visual explanation generation (VEG). QA leverages templated domain facts (DF) from the chart's CSV file, whereas VEG relies on visual context (VC) from its JSON file. In the first fine-tuning step, the charts' underlying text files are injected into the language models (LMs). We then fine-tune the QA and VEG steps on 90% of the charts, with 10% held out for testing during our evaluation in \u00a74. To understand the robustness of our LMs to natural language variation, we also perform a question paraphrasing task to rephrase our template-generated questions more naturally.","keywords":["Machine Learning Techniques; Charts, Diagrams, and Plots; Datasets; Computational Benchmark Studies"],"open_access_supplemental_link":"https://github.com/vbursztyn/charts-as-text-for-chartqa","open_access_supplemental_question":"Yes, both PCS and external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/VJvUj8FNBsU&t=0h30m12s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1276/v-short-1276_Preview.mp4?token=uk4Bi8iMPydutIRUB_RlyQm3_UIvGt5EwfIIOdsPMtI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1276/v-short-1276_Preview.srt?token=KgNukVsaW0mEY5605DEVIRE1wJQblZjvqbDrrUKxGBk&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"short7","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Short Papers: Text and Multimedia","session_uid":"v-short","session_youtube_ff_id":"m9owYC9e3PU","session_youtube_ff_link":"https://youtu.be/m9owYC9e3PU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/VJvUj8FNBsU&t=0h30m12s","sessions":["Short Papers: Text and Multimedia"],"time_stamp":"2024-10-17T14:42:00Z","title":"Representing Charts as Text for Language Models: An In-Depth Study of Question Answering for Bar Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1363","abstract":"Data visualization aids in making data analysis more intuitive and in-depth, with widespread applications in fields such as biology, finance, and medicine. For massive and continuously growing streaming time series data, these data are typically visualized in the form of line charts, but the data transmission puts significant pressure on the network, leading to visualization lag or even failure to render completely. This paper proposes a universal sampling algorithm FPCS, which retains feature points from continuously received streaming time series data, compensates for the frequent fluctuating feature points, and aims to achieve efficient visualization. This algorithm bridges the gap in sampling for streaming time series data. The algorithm has several advantages: (1) It optimizes the sampling results by compensating for fewer feature points, retaining the visualization features of the original data very well, ensuring high-quality sampled data; (2) The execution time is the shortest compared to similar existing algorithms; (3) It has an almost negligible space overhead; (4) The data sampling process does not depend on the overall data; (5) This algorithm can be applied to infinite streaming data and finite static data.","accessible_pdf":false,"authors":[{"affiliations":["China Nanhu Academy of Electronics and Information Technology(CNAEIT), JiaXing, China"],"email":"3271961659@qq.com","is_corresponding":true,"name":"Hongyan Li"},{"affiliations":["China Nanhu Academy of Electronics and Information Technology(CNAEIT), JiaXing, China"],"email":"ustcboy@outlook.com","is_corresponding":false,"name":"Bo Yang"},{"affiliations":["China Nanhu Academy of Electronics and Information Technology, Jiaxing, China"],"email":"caiyansong@cnaeit.com","is_corresponding":false,"name":"Yansong Chua"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1363","image_caption":"The FPCS algorithm is used to sample streaming time series data. Each row corresponds to one of the five typical datasets. Columns 1-4 represent the visualization fitting effect of the first 100,000 data points in these datasets using the newly proposed FPCS and the other three algorithms, based on a 100:1 sampling ratio. The red line represents original data points; the green line represents sampled data points. Column 5 uses SSIM to compare the sampling effects of the four algorithms based on sampling ratios of 100:1, 200:1, 500:1, and 1000:1. The FPCS algorithm shows the best sampling results and performance.","keywords":["Data visualization, Massive, Streaming, Time series, Line charts, Sampling, Feature, Compensating"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KgA-HGs0_4s&t=0h0m34s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1363/v-full-1363_Preview.mp4?token=0gXWRmcCMfl6L3ip3HixMsJXLSVigQa_bEzROzFp0No&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1363/v-full-1363_Preview.srt?token=EazpHxQWh4ZjsjNz-jgkfAVPiCtu1Z_KAYxozQRJBp8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-full","session_youtube_ff_id":"TAD1E6fAMHU","session_youtube_ff_link":"https://youtu.be/TAD1E6fAMHU","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h0m34s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T12:30:00Z","title":"FPCS: Feature Preserving Compensated Sampling of Streaming Time Series Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-full-1708","abstract":"The widespread use of Deep Neural Networks (DNNs) has recently resulted in their application to challenging scientific visualization tasks. While advanced DNNs demonstrate impressive generalization abilities, understanding factors like prediction quality, confidence, robustness, and uncertainty is crucial. These insights aid application scientists in making informed decisions. However, DNNs lack inherent mechanisms to measure prediction uncertainty, prompting the creation of distinct frameworks for constructing robust uncertainty-aware models tailored to various visualization tasks. In this work, we develop uncertainty-aware implicit neural representations to model steady-state vector fields effectively. We comprehensively evaluate the efficacy of two principled deep uncertainty estimation techniques: (1) Deep Ensemble and (2) Monte Carlo Dropout, aimed at enabling uncertainty-informed visual analysis of features within steady vector field data. Our detailed exploration using several vector data sets indicate that uncertainty-aware models generate informative visualization results of vector field features. Furthermore, incorporating prediction uncertainty improves the resilience and interpretability of our DNN model, rendering it applicable for the analysis of non-trivial vector field data sets.","accessible_pdf":true,"authors":[{"affiliations":["Indian Institute of Technology Kanpur , Kanpur, India"],"email":"atulkrfcb@gmail.com","is_corresponding":true,"name":"Atul Kumar"},{"affiliations":["Indian Institute of Technology Kanpur , Kanpur , India"],"email":"gsiddharth2209@gmail.com","is_corresponding":false,"name":"Siddharth Garg"},{"affiliations":["Indian Institute of Technology Kanpur (IIT Kanpur), Kanpur, India"],"email":"soumya.cvpr@gmail.com","is_corresponding":false,"name":"Soumya Dutta"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-full-1708","image_caption":"Uncertainty-aware implicit neural representation learning of vector field data. This proposed method enables neural network-guided uncertainty-informed visual analytics of vector fields by estimating the prediction uncertainty associated with the predicted values, aiming to build trustworthy and robust neural representations of complex vector data.","keywords":["Implicit Neural Network, Uncertainty, Monte Carlo Dropout, Deep Ensemble, Vector Field, Visualization, Deep Learning."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"full","paper_type_color":"#1C3160","paper_type_name":"VIS Full Paper","preprint_link":"https://doi.org/10.48550/arXiv.2407.16119","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KgA-HGs0_4s&t=0h11m49s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1708/v-full-1708_Preview.mp4?token=RuwNr0_Oj-7iuGFhpcoszwyfx6b8OX43V7VsYW4Jp-I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-full/v-full-1708/v-full-1708_Preview.srt?token=4tcbOL6zqtOUnLdOq4amQGeiATXilSco48Y5QL9epKM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-full","session_youtube_ff_id":"vEf-mNcR5M0","session_youtube_ff_link":"https://youtu.be/vEf-mNcR5M0","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h11m49s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T12:42:00Z","title":"Uncertainty-Aware Deep Neural Representations for Visual Analysis of Vector Field Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1101","abstract":"Color coding, a technique assigning specific colors to cluster information types, has proven advantages in aiding human cognitive activities, especially reading and comprehension. The rise of Large Language Models (LLMs) has streamlined document coding, enabling simple automatic text labeling with various schemes. This has the potential to make color-coding more accessible and benefit more users. However, the impact of color choice on information seeking is understudied. We conducted a user study assessing various color schemes\u2019 effectiveness in LLM-coded text documents, standardizing contrast ratios to approximately 5.55:1 across schemes. Participants performed timed information-seeking tasks in color-coded scholarly abstracts. Results showed non-analogous and yellow-inclusive color schemes improved performance, with the latter also being more preferred by participants. These findings can inform better color scheme choices for text annotation. As LLMs advance document coding, we advocate for more research focusing on the \u201ccolor\u201d aspect of color-coding techniques.","accessible_pdf":false,"authors":[{"affiliations":["Pennsylvania State University, University Park, United States"],"email":"samnghoyin@gmail.com","is_corresponding":true,"name":"Ho Yin Ng"},{"affiliations":["Pennsylvania State University, University Park, United States"],"email":"zmh5268@psu.edu","is_corresponding":false,"name":"Zeyu He"},{"affiliations":["Pennsylvania State University, University Park , United States"],"email":"txh710@psu.edu","is_corresponding":false,"name":"Ting-Hao Kenneth Huang"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1101","image_caption":"The left figure shows the 10 color schemes used in our user study, generated by combining cool (Red, Yellow) and warm (Green, Blue) colors as base colors. These schemes are categorized into groups for analysis. The right figure shows the study result that yellow-inclusive schemes are more effective for information seeking tasks, yielding higher accuracy and lower response times compared to other color schemes.","keywords":["Color, Color coding, Information seeking, Text visualization, Document."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KgA-HGs0_4s&t=0h24m23s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1101/v-short-1101_Preview.mp4?token=klJe8UgVF0yzVDoDzhM_SYu91sIQDbFcUnsgTB_t7rU&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-short","session_youtube_ff_id":"NbqkrDofSUs","session_youtube_ff_link":"https://youtu.be/NbqkrDofSUs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h24m23s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T12:54:00Z","title":"What Color Scheme is More Effective in Assisting Readers to Locate Information in a Color-Coded Article?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1199","abstract":"In the digital landscape, the ubiquity of data visualizations in media underscores the necessity for accessibility to ensure inclusivity for all users, including those with visual impairments. Current visual content often fails to cater to the needs of screen reader users due to the absence of comprehensive textual descriptions. To address this gap, we propose in this paper a framework designed to empower media content creators to transform charts into descriptive narratives. This tool not only facilitates the understanding of complex visual data through text but also fosters a broader awareness of accessibility in digital content creation. Through the application of this framework, users can interpret and convey the insights of data visualizations more effectively, accommodating a diverse audience. Our evaluations reveal that this tool not only enhances the comprehension of data visualizations but also promotes new perspectives on the represented data, thereby broadening the interpretative possibilities for all users.","accessible_pdf":false,"authors":[{"affiliations":["Polytechnique Montr\u00e9al, Montr\u00e9al, Canada"],"email":"qiangxu1204@gmail.com","is_corresponding":true,"name":"Qiang Xu"},{"affiliations":["Polytechnique Montreal, Montreal, Canada"],"email":"thomas.hurtut@polymtl.ca","is_corresponding":false,"name":"Thomas Hurtut"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1199","image_caption":"Main interface with three components: A. List of features in input chart, B. Generated descriptions of selected features, and C. Input chart itself. The list of features includes dropdowns for variable selection, and the generated descriptions are interactively linked to the chart.","keywords":["Accessibility, chart text description."],"open_access_supplemental_link":"","open_access_supplemental_question":"No","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KgA-HGs0_4s&t=0h35m50s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1199/v-short-1199_Preview.mp4?token=5UBKUTE7Fb4bntXLAbrFS0R6oeGXY8VcYqM22wFtWUI&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1199/v-short-1199_Preview.srt?token=T5rn1XHfcNwElDoSMYngG8YVQBPVUf38dslvgAwHuhM&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-short","session_youtube_ff_id":"9PS0vl2THtI","session_youtube_ff_link":"https://youtu.be/9PS0vl2THtI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h35m50s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T13:03:00Z","title":"From Graphs to Words: A Computer-Assisted Framework for the Production of Accessible Text Descriptions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1207","abstract":"An essential task of an air traffic controller is to manage the traffic flow by predicting future trajectories. Complex traffic patterns are difficult to predict and manage and impose cognitive load on the air traffic controllers. In this work we present an interactive visual analytics interface which facilitates detection and resolution of complex traffic patterns for air traffic controllers. The interface supports air traffic controllers in detecting complex clusters of aircraft and further enables them to visualize and simultaneously compare how different re-routing strategies for each individual aircraft yield reduction of complexity in the entire sector for the next hour. The development of the concepts was supported by the domain-specific feedback we received from six fully licensed and operational air traffic controllers in an iterative design process over a period of 14 months.","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden","Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"elmira.zohrevandi@liu.se","is_corresponding":true,"name":"Elmira Zohrevandi"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden","Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"katerina.vrotsou@liu.se","is_corresponding":false,"name":"Katerina Vrotsou"},{"affiliations":["Institute of Science and Technology, Norrk\u00f6ping, Sweden","Institute of Science and Technology, Norrk\u00f6ping, Sweden"],"email":"carl.westin@liu.se","is_corresponding":false,"name":"Carl A. L. Westin"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden","Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"jonas.lundberg@liu.se","is_corresponding":false,"name":"Jonas Lundberg"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden","Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"anders.ynnerman@liu.se","is_corresponding":false,"name":"Anders Ynnerman"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1207","image_caption":"The designed focus+context composite glyph aims to facilitate resolution of complex traffic patterns for air traffic controllers. The complexity resolutions are integrated with the conflict resolution glyph. The blue and red plots depict cluster complexity variations with heading and speed changes for a selected aircraft.","keywords":["Visual analytics, Visualization design, Safety-critical systems"],"open_access_supplemental_link":"","open_access_supplemental_question":"Yes, PCS","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KgA-HGs0_4s&t=0h44m25s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1207/v-short-1207_Preview.mp4?token=cIsnQXbhp_0uoaxDRqO-zLY7uifEQm61gs0IhBotL2Q&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1207/v-short-1207_Preview.srt?token=0sbOYWqtb9L6yYldqdedkFmlEbSGpgfQBGaJ2RdAGSo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-short","session_youtube_ff_id":"L6BdVBUeOno","session_youtube_ff_link":"https://youtu.be/L6BdVBUeOno","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h44m25s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T13:12:00Z","title":"Design of a Real-Time Visual Analytics Decision Support Interface to Manage Air Traffic Complexity","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"v-short-1277","abstract":"Trust is a subjective yet fundamental component of human-computer interaction, and is a determining factor in shaping the efficacy of data visualizations. Prior research has identified five dimensions of trust assessment in visualizations (credibility, clarity, reliability, familiarity, and confidence), and observed that these dimensions tend to vary predictably along with certain features of the visualization being evaluated. This raises a further question: how do the design features driving viewers' trust assessment vary with the characteristics of the viewers themselves? By reanalyzing data from these studies through the lens of individual differences, we build a more detailed map of the relationships between design features, individual characteristics, and trust behaviors. In particular, we model the distinct contributions of endogenous design features (such as visualization type, or the use of color) and exogenous user characteristics (such as visualization literacy), as well as the interactions between them. We then use these findings to make recommendations for individualized and adaptive visualization design.","accessible_pdf":true,"authors":[{"affiliations":["Smith College, Northampton, United States"],"email":"jcrouser@smith.edu","is_corresponding":false,"name":"R. Jordan Crouser"},{"affiliations":["Smith College, Northampton, United States"],"email":"cmatoussi@smith.edu","is_corresponding":true,"name":"Syrine Matoussi"},{"affiliations":["Smith College, Northampton, United States"],"email":"ekung@smith.edu","is_corresponding":false,"name":"Lan Kung"},{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"p.saugat@wustl.edu","is_corresponding":false,"name":"Saugat Pandey"},{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"m.oen@wustl.edu","is_corresponding":false,"name":"Oen G McKinley"},{"affiliations":["Washington University in St. Louis, St. Louis, United States"],"email":"alvitta@wustl.edu","is_corresponding":false,"name":"Alvitta Ottley"}],"award":"","doi":"","event_id":"v-full","event_title":"VIS Full Papers","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"v-short-1277","image_caption":"A recursive partitioning approach to identifying exogenous and endogenous predictors of trust behavior.","keywords":["Trust, data visualization, individual differences, personality"],"open_access_supplemental_link":"https://osf.io/k5tzr/","open_access_supplemental_question":"Yes, external","paper_type":"short","paper_type_color":"#FDBB30","paper_type_name":"VIS Short Paper","preprint_link":"https://arxiv.org/abs/2408.03800","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/KgA-HGs0_4s&t=0h52m20s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1277/v-short-1277_Preview.mp4?token=0jxSIKCsT42OZsAZs8zWAJQy6w8sb7p_2uNXfJQr1F0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/v-short/v-short-1277/v-short-1277_Preview.srt?token=YrMUfzToeptnVTQS9X7MQOBjfabmYa3bWVtZOqDRv7c&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"virtual1","session_room":"Palma Ceia I ","session_room_id":"palmaceia1","session_title":"Virtual: VIS from around the world","session_uid":"v-short","session_youtube_ff_id":"cBfXDjQRmaM","session_youtube_ff_link":"https://youtu.be/cBfXDjQRmaM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/KgA-HGs0_4s&t=0h52m20s","sessions":["Virtual: VIS from around the world"],"time_stamp":"2024-10-16T13:21:00Z","title":"Building and Eroding: Exogenous and Endogenous Factors that Influence Subjective Trust in Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1055","abstract":"Data is moving beyond the scientific community, flooding communication channels and addressing issues of importance to all aspects of daily life. This highlights the need for rich and expressive data representations to communicate the science on which society rests and on which society must act. However, current visualization techniques often lack the broad visual vocabulary needed to accommodate the explosion in data scale, diversity and audience perspectives. While previous work has mined artistic and design knowledge for color maps and shape affordances (glyphs) in visualization, line encoding has received little attention. In this paper we report on an exploration of visual properties that extend the vocabulary of the line, particularly for categorical encoding. We describe the creation of a corpus of lines motivated by artistic practice, Gestalt theory, and design principles, and present initial results from a study of how different visual properties influence how people associate these into sets of similar lines. While very preliminary, the findings suggest that a rich set of line attributes will support both association and categorical hierarchies, as well as provoke further inquiry into how and why line encoding can be more expressive in encoding multivariate, multidimensional data.","accessible_pdf":false,"authors":[{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"fsamsel@tacc.utexas.edu","is_corresponding":true,"name":"Francesca Samsel"},{"affiliations":["Simon Fraser University, Surrey, Canada"],"email":"lyn@sfu.ca","is_corresponding":false,"name":"Lyn Bartram"},{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"gda@tacc.utexas.edu","is_corresponding":false,"name":"Greg Abram"},{"affiliations":["University of Texas, Texas Advanced Computing Center, Austin, United States"],"email":"adb@tacc.utexas.edu","is_corresponding":false,"name":"Anne Bowen"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1055","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Papers","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Papers"],"time_stamp":"2024-10-16T14:15:00Z","title":"What\u2019s My Line? Exploring the Expressive Capacity of Lines in Scientific Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1102","abstract":"In the realm of human-computer interaction, AI interactive systems aim to foster connections and understanding among users further, deepening the communication between humans and machines as well as among multiple individuals. However, this paper highlights that current studies have neglected the media and philosophical dimensions, culminating in an interactive system named the 'Humanity Test.' \"Humanity\" refers to emotions and consciousness, while \"test\" signifies a critical study of AI technology and an exploration of the distinctions between humanity and technicality. Furthermore, based on a review of related literature, we argue that the focus of AI system research is shifting, with electroencephalogram (EEG) data becoming a trend in AI system integration. Collecting and analyzing experimental data, we identified three design directions: enhancing immersive experiences, creating emotional experiences, and expressing ideas. The experiment results indicate that integrating EEG data into AI systems markedly improves participants' immersive and emotional experiences. This integration not only promotes a deeper understanding of the human-machine boundary but also encourages empathic interactions among users. Based on these findings, EEG data as a medium shows a promising potential to enrich interactive experiences, providing new insights into integrating technology with human emotions.","accessible_pdf":false,"authors":[{"affiliations":["College of Design and Innovation, Tongji University, Shanghai, China"],"email":"373895198@qq.com","is_corresponding":true,"name":"Fang Fang"},{"affiliations":["College of Design and Innovation, Shanghai, China"],"email":"tanhaogao@gmail.com","is_corresponding":false,"name":"Tanhao Gao"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1102","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Papers","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Papers"],"time_stamp":"2024-10-16T14:25:00Z","title":"Humanity Test - EEG Data Mediated Artificial Intelligence Multiplayer Interactive System","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1044","abstract":"As advanced technology reshapes our perception, the dialogue between humans and the universe undergoes a transformative shift. Understanding this transformation can help us think about how humanity is headed in the future. To illustrate this dialogue shift, we propose the creation of a spatial art installation that embodies the revolution in dialogue. Drawing on interdisciplinary research and methodologies spanning anthropology, philosophy, astronomy, acoustics, computer science, and nomadic traditional singing, we embark on a transformative journey. Using artistic language, this work juxtaposes the most advanced astronomical observation practices of humanity with the ancient nomadic tradition of conversing with the cosmos. Specifically, it engages in a dialogue between the astronomical data from the James Webb Space Telescope and the throat-singing tradition of Khoomei. Subsequently, the work models the propagation of these sounds in three-dimensional space and materializes them into tangible entities. By immersing observers in the spatial representation of this dialogue, we offer a profound experience of evolving dialogue between human and the universe within the fluidity of spacetime.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"ywang342@connect.hkust-gz.edu.cn","is_corresponding":true,"name":"Fiona You Wang"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"anijiati587@connect.hkust-gz.edu.cn","is_corresponding":false,"name":"Joshua Nijiati Alimujiang"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"wohinwu@gmail.com","is_corresponding":false,"name":"Violet Wei Wu"},{"affiliations":["Washington University in St.Louis, St.Louis, United States"],"email":"liu.rose@wustl.edu","is_corresponding":false,"name":"Rose Yiwei Liu"},{"affiliations":["The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"],"email":"kzhangcma@hkust-gz.edu.cn","is_corresponding":false,"name":"Kang Zhang"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1044","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Papers","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Papers"],"time_stamp":"2024-10-16T14:50:00Z","title":"Spacetime Dialogue: Integrating Astronomical Data and Khoomei in Spatial Installation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1052","abstract":"Data visualization is often associated with efficiency and the production of insights. However, visual artworks that utilize data as their artistic medium, often referred to as data art or artistic visualizations, receive less attention, especially in discussions surrounding exhibitions specifically focused on data visualization. Artistic visualization is typically presented and debated at conferences on data visualization and related areas in computing and design, usually involving an exhibition of works in parallel. While there are established exhibitions in electronic art, collective exhibitions focused on artistic data visualization, especially those independent of academic events, remain rare. Additionally, there is a limited amount of literature regarding the curatorial practice of specifically artistic data visualization exhibitions. This paper aims to contribute with the discussion of the curatorial processes behind two artistic data visualization exhibitions, Numerical Existence and Numerical Existence: Emergencies, held in Rio de Janeiro in 2018 and 2024, respectively. We will present a brief overview of curatorial at- tributes, identify the most common issues addressed in exhibitions dedicated to data visualization curated in artistic contexts, discuss the role and unique challenges of curatorial practice in this field, and share insights from our curatorial experience with two exhibitions. Furthermore, we will propose future directions for research and practice in the curation of artistic data visualization. Through this exploration, we aim to contribute to the curatorial practice of artistic data visualization, providing reflections and recommendations to enhance the development of this emerging field.","accessible_pdf":false,"authors":[{"affiliations":["Federal University of Rio de Janeiro, Rio de Janeiro, Brazil","Pontifical Catholic University of Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"luiztorresludwig@gmail.com","is_corresponding":false,"name":"Luiz Ludwig"},{"affiliations":["Rio de Janeiro State University, Rio de Janeiro, Brazil"],"email":"bcastro@esdi.uerj.br","is_corresponding":false,"name":"Barbara Castro"},{"affiliations":["Universidade Federal do Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"doriskos@eba.ufrj.br","is_corresponding":true,"name":"Doris Kosminsky"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1052","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap1","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Papers","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Papers"],"time_stamp":"2024-10-16T15:00:00Z","title":"Numerical Existence: Reflections on Curating Artistic Data Visualization Exhibitions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1082","abstract":"This article introduces an artistic research project that utilises artist-in-residency and exhibition as methods for exploring the possibilities of robotic 3D printing and ceramics. The interdisciplinary project unites artists and architects to collaborate on a proposed curatorial concept and Do-It-With-Others (DIWO) technological development. Constraints include material, specifically local clay, production technique, namely 3D printing with a robotic arm, and kiln size, as well as an exhibition concept that is further elaborated in the next chapter. The pictorial presents four projects as case studies demonstrating how the creatives integrate these constraints into their processes. This integration leads to the subsequent refinement and customization of the robotic-ceramics interface, aligning with the practitioners' requirements through software development. The project's focus extends beyond artistic outcomes, aiming also to advance the pipeline of 3D robotic printing in clay, employing a digitally controlled material press that has been developed in-house, with its functionality refined through practice.","accessible_pdf":false,"authors":[{"affiliations":["Academy of Media Arts Cologne, Cologne, Germany"],"email":"varvarag@gmail.com","is_corresponding":true,"name":"Varvara Guljajeva"},{"affiliations":["Tallinn University, Tallinn, Estonia","Academy of Media Arts Cologne, Cologne, Germany"],"email":"mar.canet@gmail.com","is_corresponding":false,"name":"Mar Canet Sola"},{"affiliations":["Estonian Academy of Arts, Tallinn, Estonia"],"email":"lauri.kilusk@artun.ee","is_corresponding":false,"name":"Lauri Kilusk"},{"affiliations":["Estonian Academy of Arts, Tallinn, Estonia"],"email":"martin.melioranski@artun.ee","is_corresponding":false,"name":"Martin Melioranski"},{"affiliations":["Estonian Academy of Arts, Tallinn, Estonia"],"email":"kaiko.kivi@artun.ee","is_corresponding":false,"name":"Kaiko Kivi"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1082","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T14:15:00Z","title":"Loading Ceramics: Visualising Possibilities of Robotics in Ceramics","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1090","abstract":"With armed conflicts and wars continuing to occur globally, the pursuit of peace is an enduring concern. In the efforts to resolve these conflicts, a vast number of peace agreements have been signed. In this project, we examine the extent to which women and gender are explicitly acknowledged or addressed in peace agreements. Using debossing, we physicalize the mentions of women and gender in these agreements as a means to increase awareness and recognition of these often-overlooked constituencies.","accessible_pdf":false,"authors":[{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"jennylzx@outlook.com","is_corresponding":true,"name":"Jenny Long"},{"affiliations":["The University of Edinburgh, Edinburgh, United Kingdom"],"email":"jinrui.w@outlook.com","is_corresponding":false,"name":"Jinrui Wang"},{"affiliations":["School of Law (PeaceRep), Edinburgh, United Kingdom"],"email":"tvancisi@ed.ac.uk","is_corresponding":false,"name":"Tomas Vancisin"},{"affiliations":["School of Law (PeaceRep), Edinburgh, United Kingdom"],"email":"laura.wise@ed.ac.uk","is_corresponding":false,"name":"Laura Wise"},{"affiliations":["Newcastle University, Newcastle Upon Tyne, United Kingdom"],"email":"xinhuan.shu@gmail.com","is_corresponding":false,"name":"Xinhuan Shu"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"tcapel@ed.ac.uk","is_corresponding":false,"name":"Tara Capel"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"uhinrich@ed.ac.uk","is_corresponding":false,"name":"Uta Hinrichs"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1090","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T14:25:00Z","title":"Pieces of Peace: Women and Gender in Peace Agreements","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1099","abstract":"This pictorial illustrates an autoethnographic explora-tion of the first author\u2019s design practice for the data physicalization \u201cShredded Lives: A Decade of Migrant Loss.\u201d It emphasizes the parallel development of seven design components -- Interaction Mode, Technology, Data Representation, Physical Configuration & Scale, Dataset, Engagement Mode, and Spatial Experience. This flexible, non-hierarchical approach allows each of the seven design components to inform and evolve alongside the others, stemming from a desire to thor-oughly explore the design space without confinement by initial restrictions. As these design components overlap and intersect, dynamic interactions occur, leading to the manifestation of design ideas.","accessible_pdf":false,"authors":[{"affiliations":["Simon fraser university, Burnaby, Canada"],"email":"foroozan_daneshzand@sfu.ca","is_corresponding":true,"name":"Foroozan Daneshzand"},{"affiliations":["University of Victoria, Victoria, Canada"],"email":"cperin@uvic.ca","is_corresponding":false,"name":"Charles Perin"},{"affiliations":["Simon Fraser University, Burnaby, Canada"],"email":"sheelagh@sfu.ca","is_corresponding":false,"name":"Sheelagh Carpendale"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1099","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T14:35:00Z","title":"Design Process of 'Shredded Lives': An Illustrated Exploration","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1077","abstract":"Metro systems are the pulsing veins of cities, traversing the city\u2019s texture and preserving the memory of urban life. Visualizing the metro, which is a visceral and accustomed part of the daily lived experience for residents, makes it reappear in residents' perspectives in a new form, becoming a more emblematic landscape of each city's unique identity and development. In this project, we introduce an abstraction method that encodes metro routes as lines, cities as squares, and the global map as an abstract representation. Along with the implementation of an interactive system, the project enables a comprehensive visual exploration of the global metro lines. Through this highly abstract and minimalist form, each city\u2019s structure, symbolic identity, and regional development are revealed. Moreover, the colorful global metro map efficiently portrays the diversity and evolution of metro lines worldwide. With this pictorial we narrate the design process and our reflections along the project.","accessible_pdf":false,"authors":[{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"cxyapril@stu.pku.edu.cn","is_corresponding":true,"name":"Xinyue Chen"},{"affiliations":["Central Academy of Fine Arts, Beijing, China","Central Academy of Fine Arts, Beijing, China"],"email":"846218997@qq.com","is_corresponding":false,"name":"Yixuan Zhang"},{"affiliations":["Shanghai Jiao Tong University, Shanghai, China","Shanghai Jiao Tong University, Shanghai, China"],"email":"flora20@sjtu.edu.cn","is_corresponding":false,"name":"Yutong Yang"},{"affiliations":["NUA School of Design, Nanjing, China","NUA School of Design, Nanjing, China"],"email":"503578112@qq.com","is_corresponding":false,"name":"Jing Chen"},{"affiliations":["Syracuse University, Syrcause, United States","Syracuse University, Syrcause, United States"],"email":"rxu@syr.edu","is_corresponding":false,"name":"Rebecca Ruige Xu"},{"affiliations":["Central Academy of Fine Arts, Beijing, China","Central Academy of Fine Arts, Beijing, China"],"email":"marco@cafa.edu.cn","is_corresponding":false,"name":"Wai Ping Chan"},{"affiliations":["Peking University, Beijing, China","Peking University, Beijing, China"],"email":"xiaoru.yuan@pku.edu.cn","is_corresponding":false,"name":"Xiaoru Yuan"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1077","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T14:50:00Z","title":"City Pulse: Revealing City Identity Through Abstraction of Metro Lines","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1035","abstract":"\u201cNorthness\u201d is an installation that maps the latitudes of the servers that host the most popular websites in Brazil. Composed of three-dimensional typographic sculptures, a touch screen and projection, the work allows the public to visualize and locate the servers of the one hundred most accessed websites in Brazil. This installation is part of research in artistic data visualization that addresses issues of the data infrastructure sustaining our society, highlighting the Global North\u2019s dominance in data flows. \u201cNorthness\u201d was featured in the exhibition \u201cNumerical Existence: Emergencies,\u201d which took place in 2024 at the Futuros Cultural Center in Brazil.","accessible_pdf":false,"authors":[{"affiliations":["Pontifical Catholic University of Rio de Janeiro, Rio de Janeiro, Brazil","Federal University of Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"luiztorresludwig@gmail.com","is_corresponding":true,"name":"Luiz Ludwig"},{"affiliations":["Universidade Federal do Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"doriskos@eba.ufrj.br","is_corresponding":false,"name":"Doris Kosminsky"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1035","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T15:00:00Z","title":"Northness: Poetic Visualization of Data Infrastructure Inequality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1047","abstract":"In the face of pressing global issues like climate change, data visualization is a powerful tool for making sense of complexity. With the project \u201cA Perfect Storm\u201d, we aim to engage audiences in the oft-difficult conversation around global climate change in a way that considers the emotional responses that the topic can trigger. Through a metaphorical approach of visually juxtaposing countries' climate risk with their climate responsibility, we encourage critical reflection on the human experience and inequities of climate change related loss.","accessible_pdf":false,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"hudsonprock.c@northeastern.edu","is_corresponding":true,"name":"Chloe Hudson Prock"},{"affiliations":["Northeastern University, Boston, United States"],"email":"p.cruz@northeastern.edu","is_corresponding":false,"name":"Pedro M. Cruz"},{"affiliations":["Northeastern University, Boston, United States"],"email":"gold.g@northeastern.edu","is_corresponding":false,"name":"Gregory Gold"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1047","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visap2","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Pictorials","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Pictorials"],"time_stamp":"2024-10-17T15:10:00Z","title":"A Perfect Storm","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1004","abstract":"EchoVision is an immersive art installation that allows participants to experience the world of bats using sound visualization and mixed reality technology. With a custom-designed, bat-shaped mixed reality mask based on the open-source HoloKit mixed reality project, users can simulate echolocation, the natural navigation system bats use in the dark. They do this by using their voices and interpreting the returned echoes with the mixed-reality visualization. The exhibit adjusts visual feedback based on the pitch and tone of the user's voice, offering a dynamic and interactive depiction of how bats perceive their environment. This installation combines scientific learning with empathetic engagement, encouraging an ecocentric design perspective and understanding between species. \"EchoVision\" educates and inspires a deeper appreciation for the unique ways non-human creatures interact with their ecosystems.","accessible_pdf":false,"authors":[{"affiliations":["Reality Design Lab, New York City, United States"],"email":"botao@reality.design","is_corresponding":true,"name":"Botao Amber Hu"},{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"stephlijiabao@gmail.com","is_corresponding":false,"name":"Jiabao Li"},{"affiliations":["China Academy of Art, HangZhou, China"],"email":"danlinhuang0428@gmail.com","is_corresponding":false,"name":"Danlin Huang"},{"affiliations":["China Academy of Art, HangZhou, China"],"email":"liujianan705@outlook.com","is_corresponding":false,"name":"Jianan Johanna Liu"},{"affiliations":["Independent, Shanghai, China"],"email":"agalloch21@gmail.com","is_corresponding":false,"name":"Xiaobo Aaron Hu"},{"affiliations":["Reality Design Lab, New York City, United States"],"email":"elan@reality.design","is_corresponding":false,"name":"Yilan Elan Tao"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1004","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"EchoVision","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1014","abstract":"Flags of Inequality is a data exhibit based on the digital project of the same name. This artwork is a collection of forty-nine incomplete pride flags that invite the audience to reflect on the inequalities still faced by the LGBTQ+ population of European countries. This data visualization takes the pride flag, an iconic symbol of the community, and reworks it with data on the laws and policies in these countries to tell the story of inequality through a visual metaphor. In the visualization, the partial pride flags are presented in frames, juxtaposing color with a dark area that signifies the missing portion of the flag. Flags vary dramatically between countries. The flags for Malta or Iceland are almost complete, while the ones of Russia or Azerbaijan are barely visible. The incomplete flags portray the limitations to the lives of the queer community through the absence of color and space. On the other end, a colorful, almost complete flag is a reflection of a place where a whole, joyful queer life is more likely. This collection prompts the audience to face the emotional response caused by the meaning of the familiar yet altered symbol, promoting awareness of diverse queer realities and the need for social justice.","accessible_pdf":false,"authors":[{"affiliations":["Independent, Lisbon, Portugal"],"email":"costa.rita93@gmail.com","is_corresponding":true,"name":"Rita Costa"},{"affiliations":["Independent, Lisbon, Portugal"],"email":"mbeatrizmalveiro@gmail.com","is_corresponding":false,"name":"Beatriz Malveiro"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1014","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"Flags of Inequality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1028","abstract":"Collaborative art and co-creation enhance social well-being and connectivity. However, the combination of art creation through mutual brainwave interaction with the prosocial potential of EEG biosignals reveals an untapped opportunity. SynCocreate presents the design and prototype of a VR-based interpersonal electroencephalography (EEG) neurofeedback co-creation platform. This generative VR platform enables paired individuals to interact via brainwaves in a 3D virtual canvas, painted and animated collaboratively through their real-time brainwave data. The platform employs synchronized visual cues, aligned with the real-time brainwaves of paired users, to investigate the potential of collaborative neurofeedback in enhancing co-creativity and emotional connection. It also explores the use of Virtual Reality (VR) in fostering creativity and togetherness through immersive, collective visualizations of brainwaves.","accessible_pdf":false,"authors":[{"affiliations":["Independent Researcher, San Mateo, United States"],"email":"fionafeng97@outlook.com","is_corresponding":true,"name":"Xin Feng"},{"affiliations":["VLab, Cambridge, United States","Independent Designer, Cambridge, United States"],"email":"isabel.tg.wang@gmail.com","is_corresponding":false,"name":"Tiange Wang"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1028","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"SynCocreate: Fostering Interpersonal Connectedness via Brainwave-Driven Co-creation in Virtual Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1039","abstract":"Transferscope is an interactive installation that lets users explore and reflect the implications of generative artificial intelligence on our perception of the physical world. The handheld device allows users to sample materials, concepts and aesthetics and seamlessly project and apply them onto any object or scene, thereby creating imaginative and unique visual experiences. Transferscope is an open-source powered generative AI exploration device that showcases the expansive potential of AI technologies in artistic creation and design innovation. It empowers users to explore multifaceted aesthetics, pushing the boundaries of visual expression and conceptual ideation.","accessible_pdf":false,"authors":[{"affiliations":["University of Design Schw\u00e4bisch Gm\u00fcnd, Schw\u00e4bisch Gm\u00fcnd, Germany"],"email":"cpietsch@gmail.com","is_corresponding":true,"name":"Christopher Pietsch"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1039","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"Transferscope - Synthesized Reality","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1041","abstract":"Displacement Flowers Visualizaing global human displacment due to natural disasters One of the pressing consequences of carbon-fueled climate change is its direct link to causing various forms of natural disasters. These disasters range from wildfires, and floods, to tsunamis and earthquakes. In the fallout of these disasters many people become displaced from their homes. By the year 2050 it is estimated that 140 million people will be displaced from their home countries of sub-Saharan Africa, South Asia, and Latin America due to these disasters (World Bank). As a result, it is of increasing importance to address the impacts of climate change and not only the effects on the environment, but also on the world\u2019s inhabitants. This visualization was created in order to showcase the impact of natural disasters and the need for climate reform globally in an aesthetically beautiful, and interpretable, way.","accessible_pdf":false,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"elizabeth.mccaffrey4@gmail.com","is_corresponding":true,"name":"Elizabeth Iris McCaffrey"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1041","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"Displacement Flowers","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1068","abstract":"'Rage Against the Archive' is an experimental browser-based video that critically probes how the New York Public Library's website catalogs, displays and even sells dehumanizing ethnographic photos from the 19th-century colonial-era publication The People of India. This work interrogates how images get decontexualized due to the archival process, and documents the \u201chacking\u201d methodology used to insert different texts on the website using HTML in a symbolic act of Electronic Civil Disobedience. The People of India, published between 1868-75, is one of the world's most comprehensive ethnographic books, commissioned by the British colonial government in India after the 1857 First War of Independence. After having experienced violent uprisings and the first challenge to their colonial rule, the British were keen to understand the native tribes and their cultures to rule them better and prevent future rebellions. The camera, masquerading as an objective device, was employed as an imperial tool by the colonial government to document natives, \u201cothering\u201d them in this process. How do these problematic historical images exist in our contemporary Networked Image Culture? This video scrutinizes whether institutional archives inadvertently perpetuate colonial exploitation and the camera's violence, raising ethical questions about how we as a more conscientious society should consume certain images online.","accessible_pdf":false,"authors":[{"affiliations":["Syracuse University, Syracuse, United States"],"email":"aroy07@syr.edu","is_corresponding":true,"name":"Anshul Roy"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1068","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"Rage Against the Archive","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1089","abstract":"Self-tracking data, often embodied in photos, is a pervasive yet underrecognized form of data that captures our experiences and emotions. \"Mosaic Memory Drive\" explores the materiality of digital images, questioning whether the essence of analog photography, described by Roland Barthes as its \"punctum\", persists in the digital age. By reconstructing images through an endless loop of pixel permutations, this work blurs the line between the original and its reinterpretations, challenging the notion of a post-photographic world. The piece functions as both a puzzle of self-tracked memories and a process of encryption and decryption, emphasizing the plasticity and ephemeral nature of digital media. Through this, it invites reflection on our evolving relationship with memory, presence, and the passage of time in the context of digital data.","accessible_pdf":false,"authors":[{"affiliations":["Institute of Visual Computing and Human-Centered Technology, Vienna, Austria"],"email":"ignbpm@gmail.com","is_corresponding":true,"name":"Ignacio P\u00e9rez-Messina"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1089","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T19:15:00Z","title":"Mosaic Memory Drive","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1054","abstract":"Curbside is a personal exploration of (dis)ability and (im)mobility in wintertime Calgary. I use textiles, texts, and photographs to weave together self and the environment. Curbside connects quantitative data about snow and temperature with traces of environmental conditions using dyed wool yarns and photographs. Interlaced throughout are theoretically grounded autobiographical reflections about disability. These reflections focus on how landscape forms and interacts with disability in ways that are informed by water, snow, and ice. It embodies how different forms of data such as quantitative weather data, material traces, and personal stories can work together. Curbside is an example of data art that incorporates personal experience to illuminate local systems in thoughtful ways.","accessible_pdf":false,"authors":[{"affiliations":["University of Calgary, Calgary, Canada"],"email":"karly.ross@ucalgary.ca","is_corresponding":true,"name":"Karly Ross"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1054","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"Curbside","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1058","abstract":"Although artists and scientists often work together for a visual rendering of scientific concepts, rarely do the two come together in a such a close-knit, equal collaboration, in which the germination of the idea and the weaving together of art and science result in an oeuvre in which the scientist explains the science to the artist and the artist gives the artistic view of the science itself, allowing the public to enter the art to see the science. The data remains the same, with two different media providing different interpretive perspectives. \u00a0 In this project, five specific events in the history of the Greenland ice sheet are \u201cinterviewed\u201d, showing how the art and science are interlinked. \u201cInterviews\u201d is a multimodal art installation that seeks to provide viewers with an embodied understanding of glacial change. Through a range of scientific and artistic methodologies we identify distinct phases of knowledge-building about Greenland\u2019s ice as opportunities where texture, form, and diverse data can provide openings for encountering an otherwise overwhelming or threatening reality. Through \u201cInterviews,\u201d viewers are invited to see in Greenland\u2019s past possibilities for a different future. \u201cInterviews\u201d depicts technical advances that have enabled progress in our understanding of Greenland\u2019s Ice Sheet evolution over the millennia. The five columns, illustrate updates in methods of studying the ice, and are a testament to the ways that diverse data provide complementary insights to the same question, while at the same time illuminating new questions.","accessible_pdf":false,"authors":[{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"fsamsel@tacc.utexas.edu","is_corresponding":true,"name":"Francesca Samsel"},{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"benjamin.keisling@austin.utexas.edu","is_corresponding":false,"name":"Benjamin Keisling"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1058","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"Interviews with the Ice","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1078","abstract":"In the video series \u2018Biological Rhythms\u2019, electrical signals generated by plants are sonified and captured to drive real-time data visualisations. From this live data, we will create a series of eight video pieces ( see links to draft versions of the first four in \u2018Recent work, video links\u2019 section below). Living plants and the human body may appear to be very different entities, but they have many underlying confluences. Once such confluence is that both generate bio-electrical signals that pass through bodily systems. In \u2018Biological Rhythms\u2019 we will use these signals to generate real time visualisations, revealing the unseen bioelectrical rhythms of plants. Through the biological sciences, we understand plant meta- processes such as osmosis and photosynthesis, yet because their cellular structure is so delicate, plants are notoriously hard to study in fine detail. Sonifying plant signals affords a method to explore their bio-rhythms in an accessible form for a non-scientific audience. As part of our bespoke and innovative method, the electrical signals from plants are converted to audio and passed through the program Touch Designer, where the plant signals activate complex geometrical forms. Simon Howden composes 'human' music which is mixed live with the plant signals, allowing us to explore co-creation with living plants as a posthuman mode of artistic research.","accessible_pdf":false,"authors":[{"affiliations":["Queensland University of Technology, Brisbane, Australia","UnCalculated Studio, Brisbane, Australia"],"email":"rewa.wright@qut.edu.au","is_corresponding":true,"name":"Rewa Wright"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1078","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"BioRhythms: Artistic research with plants, real-time animation and sound","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1079","abstract":"This artwork was born of witnessing my grandmother's memory regression due to dementia, where her cherished stories dissolved into fragmented words. Dr. Mary Steedly once described memories as a \"densely layered, sometimes conflictual negotiation with the passage of time\", and in 2022, over 50 million people faced this painful reality of memory loss due to Alzheimer's and related dementias. Yet, amidst this poignant backdrop, the emergence of text-to-image AI systems in 2022 offered a glimmer of new perspective, as they harnessed the power of language to imagine and reassemble fragmented memories, possibly to weave what time and disease had stolen. \u200b When we coexist with machines, will we accumulate synthetic recollections of collective symbiotic imagination? Is language capable of re-weaving and synthesizing memories? How does our collective memory inspire new visual forms and alternative narratives? Recollection is an assemblage of intimate human-machine artifacts that emphasizes the contributions from three sides: artists, machines, and participants. This customized AI application facilitates multiple AI techniques, like speech recognition, text auto-completion, and text-to-image, to convert language input into image sequences of new memories. As an interactive experience, participants will whisper their personal memories with fragmented sentences, and our system will automatically fill in details, creating new touching visual memories. We developed our customized AI system by fine-tuning a pre-trained transformer-based AI model to learn the documentaries of Alzheimer patients\u2019 visual memories and their descriptions. The system imagines new memories of \"love\" and \"loss\" by interpreting real-time narratives from participants in the installation. Our system emerges as a vibrant and inclusive conversation starter, transcending boundaries with support for over 89 different languages, embracing the diverse cultural artifacts. In the art installation, we chose not to showcase the direct visual output generated by our AI system. Instead, we drew inspiration from fine-art practices such as the Monotype, a printmaking technique tracing its origins to the 1640s, and slitscan photography, known for capturing sequential slices of a subject over time. We aimed to present ReCollection by combining generative methodologies with fine-art practices, investigating new aesthetics that explore the fleeting visual imagery, undergoing dissolution, tilting, printing, and reprinting over time. By providing a conceptual framework for non-linear narratives, which constitute symbiotic imaginations, and future scenarios of memories, culture production, and reproductions. It may inspire the cure for memory regression by providing a future scenario, a thought experiment, and an intimate recollection of symbiosis between beings and apparatus. It raises people's awareness of future memory preservation and their empathy for the dementia community through a personalized aesthetic experience. It offers an artistic approach and future prototype for cultural heritage reproduction and re-imagination and explores the tensions that exist in the co-relations between visual representations, language, and narratives.","accessible_pdf":false,"authors":[{"affiliations":["Arizona State University, Tempe, United States"],"email":"weidizhang@ucsb.edu","is_corresponding":true,"name":"weidi zhang"},{"affiliations":["Independant Researcher, Beijing, China"],"email":"jieliang@ucsb.edu","is_corresponding":false,"name":"Jieliang Luo"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1079","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"ReCollection","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1094","abstract":"Our work builds on the study of notational systems in the context of rap music and offers rich insights into the complexities of language, culture, and expression in a postcolonial culture. We developed our algorithm by analyzing the classic hip-hop song \u201c93 till Infinity\u201d by Souls of Mischief. Isolating each individual instrument is typical for MIDI files, but data is not available in this format for songs recorded before the new millennium, which were laid on 2\u201d cellulose tapes. Thus, we recreated the song through sampling from the original mp4 format, which only supplies one track of data. As we only needed enough data to map to a visually legible design, the quality of this data was not \u2018audio quality\u2019, however, we would not have been able to computationally visualize a song of this vintage without it. With Rap Tapestry, we provide a new mode of expression for understanding the structure and flow of a rap song, mapping each instrument track individually, in combination with colored dots reflecting the rhyming patterns within the rap lyrics. The piece can be experienced in tandem with the audio or in the digital system for a finer grained level of analysis.","accessible_pdf":false,"authors":[{"affiliations":["Northeastern University, Boston, United States"],"email":"c.hull@northeastern.edu","is_corresponding":true,"name":"Carmen Hull"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1094","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"Rap Tapestry: A Music Visualization Tool with Physical Weaving Data Physicalization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1097","abstract":"Inspired by Wagashi, the traditional Japanese confection art regarded as a microcosm of time, space and nature, DataWagashi is a new medium aiming to make data tangible, accessible and fun by blending taste, smell, touch, texture, and physical interaction into the vocabulary of data communication. By embracing a sensory upgrade from data visualization to data physicalization, Data Wagashi turns data into an experience that is sharable among people and accessible to those with different sensory capabilities, making complex environmental data approachable, foster empathy, and empower people to make better choices.","accessible_pdf":false,"authors":[{"affiliations":["VLab, Cambridge, United States","Independent Designer, Cambridge, United States"],"email":"isabel.tg.wang@gmail.com","is_corresponding":true,"name":"Tiange Wang"},{"affiliations":["VLab, Cambridge, United States","Independent Designer, Cambridge, United States"],"email":"ihuang@gsd.harvard.edu","is_corresponding":false,"name":"I-Yang Huang"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1097","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"DataWagashi: Feeling Climate Data via New Design Medium","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"a-visap-1103","abstract":"With armed conflicts and wars continuing to occur globally, the pursuit of peace is an enduring concern. In the efforts to resolve these conflicts, a vast number of peace agreements have been signed. In this project, we examine the extent to which women and gender are explicitly acknowledged or addressed in peace agreements. Using debossing, we physicalize the mentions of women and gender in these agreements as a means to increase awareness and recognition of these often-overlooked constituencies.","accessible_pdf":false,"authors":[{"affiliations":["The University of Edinburgh, Edinburgh, United Kingdom"],"email":"jinrui.w@outlook.com","is_corresponding":false,"name":"Jinrui Wang"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"jennylzx@outlook.com","is_corresponding":true,"name":"Jenny Long"},{"affiliations":["School of Law (PeaceRep), Edinburgh, United Kingdom"],"email":"tvancisi@ed.ac.uk","is_corresponding":false,"name":"Tomas Vancisin"},{"affiliations":["School of Law (PeaceRep), Edinburgh, United Kingdom"],"email":"laura.wise@ed.ac.uk","is_corresponding":false,"name":"Laura Wise"},{"affiliations":["Newcastle University, Newcastle Upon Tyne, United Kingdom"],"email":"xinhuan.shu@gmail.com","is_corresponding":false,"name":"Xinhuan Shu"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"tcapel@ed.ac.uk","is_corresponding":false,"name":"Tara Capel"},{"affiliations":["University of Edinburgh, Edinburgh, United Kingdom"],"email":"uhinrich@ed.ac.uk","is_corresponding":false,"name":"Uta Hinrichs"}],"award":"","doi":"","event_id":"a-visap","event_title":"VIS Arts Program","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"a-visap-1103","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"associated","paper_type_color":"#2672B9","paper_type_name":"Associated Event","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"visapr","session_room":"Bayshore III","session_room_id":"bayshore3","session_title":"VISAP Artist Talks","session_uid":"a-visap","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISAP Artist Talks"],"time_stamp":"2024-10-15T20:15:00Z","title":"Pieces of Peace: Women and Gender in Peace Agreements","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1004","abstract":"Large Language Models (LLMs) have been widely applied in summarization due to their speedy and high-quality text generation. Summarization for sensemaking involves information compression and insight extraction. Human guidance in sensemaking tasks can prioritize and cluster relevant information for LLMs. However, users must translate their cognitive thinking into natural language to communicate with LLMs. Can we use more readable and operable visual representations to guide the summarization process for sensemaking? Therefore, we propose introducing an intermediate step--a schematic visual workspace for human sensemaking--before the LLM generation to steer and refine the summarization process. We conduct a series of proof-of-concept experiments to investigate the potential for enhancing the summarization by GPT-4 through visual workspaces. Leveraging a textual sensemaking dataset with a ground truth summary, we evaluate the impact of a human-generated visual workspace on LLM-generated summarization of the dataset and assess the effectiveness of space-steered summarization. We categorize several types of extractable information from typical human workspaces that can be injected into engineered prompts to steer the LLM summarization. The results demonstrate how such workspaces can help align an LLM with the ground truth, leading to more accurate summarization results than without the workspaces.","accessible_pdf":false,"authors":[{"affiliations":["Computer Science Department, Blacksburg, United States"],"email":"tangxxwhu@gmail.com","is_corresponding":true,"name":"Xuxin Tang"},{"affiliations":["Dod, Laurel, United States"],"email":"ericpkrokos@gmail.com","is_corresponding":false,"name":"Eric Krokos"},{"affiliations":["Department of Defense, College Park, United States"],"email":"visual.tycho@gmail.com","is_corresponding":false,"name":"Kirsten Whitley"},{"affiliations":["City University of Hong Kong, Hong Kong, China"],"email":"canliu@cityu.edu.hk","is_corresponding":false,"name":"Can Liu"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"naren@cs.vt.edu","is_corresponding":false,"name":"Naren Ramakrishnan"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"north@vt.edu","is_corresponding":false,"name":"Chris North"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1004","image_caption":"We created an intermediate workspace based on the ground truth of an intelligence analysis dataset to better understand the enhancements in LLM summarization achieved by integrating the worksapce. We then conducted proof-of-concept experiments to assess how the workspace and each type of information impact LLM summarization. The experiment pipeline and simulated workspace is shown in the image.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Steering LLM Summarization with Visual Workspaces for Sensemaking","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1007","abstract":"We explore the use of segmentation and summarization methods for the generation of real-time conversation topic timelines, in the context of glanceable Augmented Reality (AR) visualization. Conversation timelines may serve to summarize and contextualize conversations as they are happening, helping to keep conversations on track. Because dialogue and conversations are broad and unpredictable by nature, and our processing is being done in real-time, not all relevant information may be present in the text at the time it is processed. Thus, we present considerations and challenges which may not be as prevalent in traditional implementations of topic classification and dialogue segmentation. Furthermore, we discuss how AR visualization requirements and design practices require an additional layer of decision making, which must be factored directly into the text processing algorithms. We explore three segmentation strategies -- using dialogue segmentation based on the text of the entire conversation, segmenting on 1-minute intervals, and segmenting on 10-second intervals -- and discuss our results.","accessible_pdf":false,"authors":[{"affiliations":["University of Calgary, Calgary, Canada"],"email":"shanna.hollingwor1@ucalgary.ca","is_corresponding":true,"name":"Shanna Li Ching Hollingworth"},{"affiliations":["University of Calgary, Calgary, Canada"],"email":"wj@wjwillett.net","is_corresponding":false,"name":"Wesley Willett"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1007","image_caption":"A screenshot of an early system prototype of a real-time conversation timeline visualized in augmented reality, broken into 10-second chunks of conversation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Towards Real-Time Speech Segmentation for Glanceable Conversation Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1008","abstract":"Academic literature reviews have traditionally relied on techniques such as keyword searches and accumulation of relevant back-references, using databases like Google Scholar or IEEEXplore. However, both the precision and accuracy of these search techniques is limited by the presence or absence of specific keywords, making literature review akin to searching for needles in a haystack. We present vitaLITy 2, a solution that uses a Large Language Model or LLM-based approach to identify semantically relevant literature in a textual embedding space. We include a corpus of 66,692 papers from 1970-2023 which are searchable through text embeddings created by three language models. vitaLITy 2 contributes a novel Retrieval Augmented Generation (RAG) architecture and can be interacted with through an LLM with augmented prompts, including summarization of a collection of papers. vitaLITy 2 also provides a chat interface that allow users to perform complex queries without learning any new programming language. This also enables users to take advantage of the knowledge captured in the LLM from its enormous training corpus. Finally, we demonstrate the applicability of vitaLITy 2 through two usage scenarios.","accessible_pdf":false,"authors":[{"affiliations":["University of Nottingham, Nottingham, United Kingdom"],"email":"psxah15@nottingham.ac.uk","is_corresponding":true,"name":"Hongye An"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"arpitnarechania@gatech.edu","is_corresponding":false,"name":"Arpit Narechania"},{"affiliations":["University of Nottingham, Nottingham, United Kingdom"],"email":"kai.xu@nottingham.ac.uk","is_corresponding":false,"name":"Kai Xu"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1008","image_caption":"The figure shows a diagram of the system architecture of VITALITY 2. VITALITY 2 is an innovative platform aimed at streamlining academic literature search and review. It uses Large Language Models to identify relevant papers, providing a chat interface for natural language queries.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.13450","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1008/w-nlviz-1008_Preview.mp4?token=y2MR5-H0oG3Jtsan-bnhnSpndim7GH_XnD9XZ1hb_40&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1008/w-nlviz-1008_Preview.srt?token=AjUOxTsJYV8q8pajUb3Qnf3cG940axo4M5xIqK0jMLA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"hXf2ythEUrk","session_youtube_ff_link":"https://youtu.be/hXf2ythEUrk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"vitaLITy 2: Reviewing Academic Literature Using Large Language Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1009","abstract":"Analyzing and finding anomalies in multi-dimensional datasets is a cumbersome but vital task across different domains. In the context of financial fraud detection, analysts must quickly identify suspicious activity among transactional data. This is an iterative process made of complex exploratory tasks such as recognizing patterns, grouping, and comparing. To mitigate the information overload inherent to these steps, we present a tool combining automated information highlights, Large Language Model generated textual insights, and visual analytics, facilitating exploration at different levels of detail. We perform a segmentation of the data per analysis area and visually represent each one, making use of automated visual cues to signal which require more attention. Upon user selection of an area, our system provides textual and graphical summaries. The text, acting as a link between the high-level and detailed views of the chosen segment, allows for a quick understanding of relevant details. A thorough exploration of the data comprising the selection can be done through graphical representations. The feedback gathered in a study performed with seven domain experts suggests our tool effectively supports and guides exploratory analysis, easing the identification of suspicious information.","accessible_pdf":false,"authors":[{"affiliations":["Feedzai, Lisbon, Portugal"],"email":"beatriz.feliciano@feedzai.com","is_corresponding":true,"name":"Beatriz Feliciano"},{"affiliations":["Feedzai, Lisbon, Portugal"],"email":"rita.costa@feedzai.com","is_corresponding":false,"name":"Rita Costa"},{"affiliations":["Feedzai, Porto, Portugal"],"email":"jean.alves@feedzai.com","is_corresponding":false,"name":"Jean Alves"},{"affiliations":["Feedzai, Madrid, Spain"],"email":"javier.liebana@feedzai.com","is_corresponding":false,"name":"Javier Li\u00e9bana"},{"affiliations":["Feedzai, Lisbon, Portugal"],"email":"diogo.duarte@feedzai.com","is_corresponding":false,"name":"Diogo Ramalho Duarte"},{"affiliations":["Feedzai, Lisbon, Portugal"],"email":"pedro.bizarro@feedzai.com","is_corresponding":false,"name":"Pedro Bizarro"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1009","image_caption":"The interface guides the analysis of financial multi-dimensional datasets through multiple levels of detail exploration. It is composed of (A) a region where the alert is segmented in the subgroups that compose it (A.1, A.2, A.3, A.4, A.5, and A.6) and where groups that require more attention (in this case, A.5) are highlighted in red; (B) an automatically generated text summary of a selected area (A.3) that provides a broad understanding of the group; and (C) an interactive graphical representation of all the data points of the selected area to explore information in detail.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1009/w-nlviz-1009_Preview.mp4?token=g34tqFW7hEOO-D0vRVGUskUrSvO9BTa5jnRCboZ1bF4&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1009/w-nlviz-1009_Preview.srt?token=vytwDrOsrTkd45XIC6aDkP-CnIfx3qT1SYSK5I1lwx0&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"ywuG-oB69rs","session_youtube_ff_link":"https://youtu.be/ywuG-oB69rs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"\u201cShow Me What\u2019s Wrong!\u201d: Combining Charts and Text to Guide Data Analysis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1010","abstract":"Dimension reduction (DR) can transform high-dimensional text embeddings into a 2D visual projection facilitating the exploration of document similarities. However, the projection often lacks connection to the text semantics, due to the opaque nature of text embeddings and non-linear dimension reductions. To address these problems, we propose a gradient-based method for visualizing the spatial semantics of dimensionally reduced text embeddings. This method employs gradients to assess the sensitivity of the projected documents with respect to the underlying words. The method can be applied to existing DR algorithms and text embedding models. Using these gradients, we designed a visualization system that incorporates spatial word clouds into the document projection space to illustrate the impactful text features. We further present three usage scenarios that demonstrate the practical applications of our system to facilitate the discovery and interpretation of underlying semantics in text projections.","accessible_pdf":false,"authors":[{"affiliations":["Computer Science, Virginia Tech, Blacksburg, United States"],"email":"wliu3@vt.edu","is_corresponding":true,"name":"Wei Liu"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"north@vt.edu","is_corresponding":false,"name":"Chris North"},{"affiliations":["Tulane University, New Orleans, United States"],"email":"rfaust1@tulane.edu","is_corresponding":false,"name":"Rebecca Faust"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1010","image_caption":"Document projection of COVID-19 open research articles with gradient-based word explanations. (Top) A projection from a BERT model fine-tuned based on the data domain, featuring a spatial word cloud that captures the spatial semantics by showing key words that impact the projection. (Bottom) A heatmap of word impacts in a selected document, highlighting the word \"smoking\", which reflects the domain context. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.03949","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1010/w-nlviz-1010_Preview.mp4?token=bSHHu08UdZr9ZXWQZxXbwqotHqmKVJqnMcQx3z8Nou8&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"P-20dcQY1wI","session_youtube_ff_link":"https://youtu.be/P-20dcQY1wI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Visualizing Spatial Semantics of Dimensionally Reduced Text Embeddings","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1011","abstract":"Recently, large language models (LLMs) have shown great promise in translating natural language (NL) queries into visualizations, but their \u201cblack-box\u201d nature often limits explainability and debuggability. In response, we present a comprehensive text prompt that, given a tabular dataset and an NL query about the dataset, generates an analytic specification including (detected) data attributes, (inferred) analytic tasks, and (recommended) visualizations. This specification captures key aspects of the query translation process, affording both explainability and debuggability. For instance, it provides mappings from the detected entities to the corresponding phrases in the input query, as well as the specific visual design principles that determined the visualization recommendations. Moreover, unlike prior LLM-based approaches, our prompt supports conversational interaction and ambiguity detection capabilities. In this paper, we detail the iterative process of curating our prompt, present a preliminary performance evaluation using GPT-4, and discuss the strengths and limitations of LLMs at various stages of query translation.","accessible_pdf":true,"authors":[{"affiliations":["UNC Charlotte, Charlotte, United States"],"email":"ssah1@uncc.edu","is_corresponding":false,"name":"Subham Sah"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"rmitra34@gatech.edu","is_corresponding":true,"name":"Rishab Mitra"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"arpitnarechania@gatech.edu","is_corresponding":false,"name":"Arpit Narechania"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"endert@gatech.edu","is_corresponding":false,"name":"Alex Endert"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"john.stasko@cc.gatech.edu","is_corresponding":false,"name":"John Stasko"},{"affiliations":["UNC Charlotte, Charlotte, United States"],"email":"wdou1@uncc.edu","is_corresponding":false,"name":"Wenwen Dou"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1011","image_caption":"Figure showing NL4DV-LLM pipeline for Generating Analytic Specifications for Data Visualization from Natural Language Queries using Large Language Models.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.13391","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1011/w-nlviz-1011_Preview.mp4?token=NhjPytOjKrz329cylPDzMT_PCDr8y8g0oEUWNDbzGmk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1011/w-nlviz-1011_Preview.srt?token=9fl8NRyQX_o9vqmKY6hjIx3GswiLz8EEAJbJskYGJpA&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"jF33mGxryrM","session_youtube_ff_link":"https://youtu.be/jF33mGxryrM","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Generating Analytic Specifications for Data Visualization from Natural Language Queries using Large Language Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1016","abstract":"We explore how natural language authoring with large language models (LLMs) can support the inline authoring of word-scale visualizations (WSVs).While word-scale visualizations that live alongside and within document text can support rich integration of data into written narratives and communication, these small visualizations have typically been challenging to author. We explore how modern LLMs---which are able to generate diverse visualization designs based on simple natural language descriptions---might allow authors to specify and insert new visualizations inline as they write text.Drawing on our experiences with an initial prototype built using GPT-4, we highlight the expressive potential of inline natural language visualization authoring and identify opportunities for further research.","accessible_pdf":true,"authors":[{"affiliations":["University of Calgary, Calgary, Canada"],"email":"paige.sobrien@ucalgary.ca","is_corresponding":true,"name":"Paige So'Brien"},{"affiliations":["University of Calgary, Calgary, Canada"],"email":"wj@wjwillett.net","is_corresponding":false,"name":"Wesley Willett"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1016","image_caption":"This image is a screenshot of an editor application where authors can create and embed word-scale visualizations for text using LLM capabilities. The screenshot of the application includes a text area where authors can add their content. Below the text area there is a search bar for authors to submit plain language instructions for creating a visualization. In the text area, the numbers 1 2 3 4 are highlighted and used to generate a bar chart of the four values displayed inline with the text. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1016/w-nlviz-1016_Preview.mp4?token=uc0UehyL9uXCt3ew8c1Uusat150b8TfAqMhbW79z_hs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1016/w-nlviz-1016_Preview.srt?token=oUhIwPjdOVxBNz_MOd7pjd2MO9LYuu5jykJxvxYK_Vo&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"xNb6NcY2Rpo","session_youtube_ff_link":"https://youtu.be/xNb6NcY2Rpo","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Towards Inline Natural Language Authoring for Word-Scale Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1019","abstract":"As language models have become increasingly successful at a wide array of tasks, different prompt engineering methods have been developed alongside them in order to adapt these models to new tasks. One of them is Tree-of-Thoughts (ToT), a prompting strategy and framework for language model inference and problem-solving. It allows the model to explore multiple solution paths and select the best course of action, producing a tree-like structure of intermediate steps (i.e., thoughts). This method was shown to be effective for several problem types. However, the official implementation has a high barrier to usage as it requires setup overhead and incorporates task-specific problem templates which are difficult to generalize to new problem types. It also does not allow user interaction to improve or suggest new thoughts. We introduce iToT (interactive Tree-of- Thoughts), a generalized and interactive Tree of Thought prompting system. iToT allows users to explore each step of the model\u2019s problem-solving process as well as to correct and extend the model\u2019s thoughts. iToT revolves around a visual interface that facilitates simple and generic ToT usage and transparentizes the problem-solving process to users. This facilitates a better understanding of which thoughts and considerations lead to the model\u2019s final decision. Through two case studies, we demonstrate the usefulness of iToT in different human-LLM co-writing tasks.","accessible_pdf":true,"authors":[{"affiliations":["ETHZ, Zurich, Switzerland"],"email":"aboyle@student.ethz.ch","is_corresponding":false,"name":"Alan David Boyle"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"igupta@ethz.ch","is_corresponding":false,"name":"Isha Gupta"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"shoenig@student.ethz.ch","is_corresponding":false,"name":"Sebastian H\u00f6nig"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"lukas.mautner98@gmail.com","is_corresponding":false,"name":"Lukas Mautner"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"kenza.amara@ai.ethz.ch","is_corresponding":false,"name":"Kenza Amara"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"furui.cheng@inf.ethz.ch","is_corresponding":false,"name":"Furui Cheng"},{"affiliations":["ETH Z\u00fcrich, Z\u00fcrich, Switzerland"],"email":"melassady@ai.ethz.ch","is_corresponding":false,"name":"Mennatallah El-Assady"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1019","image_caption":"We introduce iToT (interactive Tree-of-Thoughts), a generalized and interactive Tree of Thought prompting system. The iToT workflow: During initialization, the user provides an input prompt describing the task, examples of successful sequences of thoughts, and an evaluation prompt with self-evaluation criteria. They also specify the model parameters and visualization settings (1). During the generation process, the parametrized model produces a set of ranked candidate thoughts. The user can expand on these model-generated thoughts or add a new custom thought (2). Finally, iToT offers evaluation: thoughts are ranked by the model's self-evaluation and assessed based on their semantic similarity and self-consistency (3).","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.00413","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1019/w-nlviz-1019_Preview.mp4?token=nS_Jlckf5IkzLwNdlu5KO-1wQYvURkYMYbBURIvPK_Q&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1019/w-nlviz-1019_Preview.srt?token=x4UYP36Yra1jccXARMK6kkUITOgXtpQqWN-jCYx74fQ&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"hj2FVIZWiSk","session_youtube_ff_link":"https://youtu.be/hj2FVIZWiSk","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"iToT: An Interactive System for Customized Tree-of-Thought Generation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1020","abstract":"Strategy management analyses are created by business consultants with common analysis frameworks (i.e. comparative analyses) and associated diagrams. We show these can be largely constructed using LLMs, starting with the extraction of insights from data, organization of those insights according to a strategy management framework, and then depiction in the typical strategy management diagram for that framework (static textual visualizations). We discuss caveats and future directions to generalize for broader uses.","accessible_pdf":false,"authors":[{"affiliations":["Uncharted Software, Toronto, Canada"],"email":"richard.brath@alumni.utoronto.ca","is_corresponding":true,"name":"Richard Brath"},{"affiliations":["Uncharted Software, Toronto, Canada"],"email":"miltonjbradley@gmail.com","is_corresponding":false,"name":"Adam James Bradley"},{"affiliations":["Uncharted Software, Toronto, Canada"],"email":"david@jonker.work","is_corresponding":false,"name":"David Jonker"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1020","image_caption":"From insight generation to diagram by LLM: 1. The LLM generates insights from data. 2. The LLM organizes insights by a strategy management analysis framework, e.g. Porter\u2019s Five Forces of Value Discipline. 3. The LLM generates the corresponding strategy management diagram.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1020/w-nlviz-1020_Preview.mp4?token=C8aMLxtsocipeEwrixc0-JxHGGPSavrm0QgbrcxLsgA&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-nlviz/w-nlviz-1020/w-nlviz-1020_Preview.srt?token=2ZhBfAnDs3zJFFuL8WFB8HZ6wsLp0zz9las4-yAVirQ&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"aefl1VsQPDc","session_youtube_ff_link":"https://youtu.be/aefl1VsQPDc","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Strategic management analysis: from data to strategy diagram by LLM","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1021","abstract":"We present a mixed-methods study to explore how large language models (LLMs) can assist users in the visual exploration and analysis of complex data structures, using knowledge graphs (KGs) as a baseline. We surveyed and interviewed 20 professionals who regularly work with LLMs with the goal of using them for (or alongside) KGs. From the analysis of our interviews, we contribute a preliminary roadmap for the design of LLM-driven visual analysis systems and outline future opportunities in this emergent design space.","accessible_pdf":false,"authors":[{"affiliations":["MIT Lincoln Laboratory, Lexington, United States"],"email":"harry.li@ll.mit.edu","is_corresponding":false,"name":"Harry Li"},{"affiliations":["Tufts University, Medford, United States"],"email":"gabriel.appleby@tufts.edu","is_corresponding":false,"name":"Gabriel Appleby"},{"affiliations":["MIT Lincoln Laboratory, Lexington, United States"],"email":"ashley.suh@ll.mit.edu","is_corresponding":true,"name":"Ashley Suh"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1021","image_caption":"We present a mixed-methods study to explore how large language models (LLMs) can assist users in the visual exploration and analysis of complex data structures, using knowledge graphs (KGs) as a baseline. We surveyed and interviewed 20 professionals who regularly work with LLMs with the goal of using them for (or alongside) KGs. From the analysis of our interviews, we contribute a preliminary roadmap for the design of LLM-driven visual analysis systems and outline future opportunities in this emergent design space.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"A Preliminary Roadmap for LLMs as Visual Data Analysis Assistants","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-nlviz-1022","abstract":"This study explores the potential of visual representation in understanding the structural elements of Arabic poetry, a subject of significant educational and research interest. Our objective is to make Arabic poetic works more accessible to readers of both Arabic and non-Arabic linguistic backgrounds by employing visualization, exploration, and analytical techniques. We transformed poetry texts into syllables, identified their metrical structures, segmented verses into patterns, and then converted these patterns into visual representations. Following this, we computed and visualized the dissimilarities between these images, and overlaid their differences. Our findings suggest that the positional patterns across a poem play a pivotal role in effective poetry clustering, as demonstrated by our newly computed metrics. The results of our clustering experiments showed a marked improvement over previous attempts, thereby providing new insights into the composition and structure of Arabic poetry. This study underscored the value of visual representation in enhancing our understanding of Arabic poetry.","accessible_pdf":true,"authors":[{"affiliations":["University of Neuch\u00e2tel, Neuch\u00e2tel, Switzerland"],"email":"abdelmalek.berkani@unine.ch","is_corresponding":true,"name":"Abdelmalek Berkani"},{"affiliations":["University of Neuch\u00e2tel, Neuch\u00e2tel, Switzerland"],"email":"adrian.holzer@unine.ch","is_corresponding":false,"name":"Adrian Holzer"}],"award":"","doi":"","event_id":"w-nlviz","event_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-nlviz-1022","image_caption":"This image illustrates the overlay of structural and color differences between the first 10 lines of two poems, converted into images after detecting the meter and patterns. The analysis of these differences led to the calculation of comparison and classification metrics.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop1","session_room":"Bayshore II","session_room_id":"bayshore2","session_title":"NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization","session_uid":"w-nlviz","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["NLVIZ Workshop: Exploring Research Opportunities for Natural Language, Text, and Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Enhancing Arabic Poetic Structure Analysis through Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-1762","abstract":"Weather can have a significant impact on the power grid. Heat and cold waves lead to increased energy use as customers cool or heat their space, while simultaneously hampering energy production as the environment deviates from ideal operating conditions. Extreme heat has previously melted power cables, while extreme cold can cause vital parts of the energy infrastructure to freeze. Utilities have reserves to compensate for the additional energy use, but in extreme cases which fall outside the forecast energy demand, the impact on the power grid can be severe. In this paper, we present an interactive tool to explore the relationship between weather and power outages. We demonstrate its use with the example of the impact of Winter Storm Uri on Texas in February 2021.","accessible_pdf":false,"authors":[{"affiliations":["Institute of Computer Science, Leipzig University, Leipzig, Germany"],"email":"nsonga@informatik.uni-leipzig.de","is_corresponding":true,"name":"Baldwin Nsonga"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"andy.berres@gmail.com","is_corresponding":false,"name":"Andy S Berres"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"bobby.jeffers@nrel.gov","is_corresponding":false,"name":"Robert Jeffers"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"caitlyn.clark6@icloud.com","is_corresponding":false,"name":"Caitlyn Clark"},{"affiliations":["University of Kaiserslautern, Kaiserslautern, Germany"],"email":"hagen@cs.uni-kl.de","is_corresponding":false,"name":"Hans Hagen"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"scheuermann@informatik.uni-leipzig.de","is_corresponding":false,"name":"Gerik Scheuermann"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-1762","image_caption":"Weather can have a significant impact on the power grid. In this paper, we propose an interactive tool to explore the relationship between weather and power outages. We demonstrate its use with the example of the impact of winter storm Uri on Texas in February 2021. While the number of affected customers by county, median temperatures, and unavailable power are shown in juxtaposed timelines for easy temporal comparison, the map view shows the spatial distribution of temperature and outages. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-1762/w-energyvis-1762_Preview.mp4?token=QQ_HHel2WPYLN9wZuayBdQrrivNDAdvyec2pZEtlD3M&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-1762/w-energyvis-1762_Preview.srt?token=QbRAW3fYWlzBPpqEXdBPXQeXb2bg44S4nB1578sd1Og&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"i6cHT3DHCm4","session_youtube_ff_link":"https://youtu.be/i6cHT3DHCm4","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Extreme Weather and the Power Grid: A Case Study of Winter Storm Uri","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-2646","abstract":"With the growing penetration of inverter-based distributed energy resources and increased loads through electrification, power systems analyses are becoming more important and more complex. Moreover, these analyses increasingly involve the combination of interconnected energy domains with data that are spatially and temporally increasing in scale by orders of magnitude, surpassing the capabilities of many existing analysis and decision-support systems. We present the architectural design, development, and application of a high-resolution web-based visualization environment capable of cross-domain analysis of tens of millions of energy assets, focusing on scalability and performance. Our system supports the exploration, navigation, and analysis of large data from diverse domains such as electrical transmission and distribution systems, mobility and electric vehicle charging networks, communications networks, cyber assets, and other supporting infrastructure. We evaluate this system across multiple use cases, describing the capabilities and limitations of a web-based approach for high-resolution energy system visualizations.","accessible_pdf":false,"authors":[{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"graham.johnson@nrel.gov","is_corresponding":true,"name":"Graham Johnson"},{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"sam.molnar@nrel.gov","is_corresponding":false,"name":"Sam Molnar"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"nicholas.brunhart-lupo@nrel.gov","is_corresponding":false,"name":"Nicholas Brunhart-Lupo"},{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"kenny.gruchalla@nrel.gov","is_corresponding":false,"name":"Kenny Gruchalla"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-2646","image_caption":"Image Description: Snapshot of the 100-megapixel high-resolution display with an interactive visualization in the browser. Two synthetic energy model topologies are shown: an electrical transmission system (blue lines) and a corresponding distribution system (orange points) in the San Francisco Bay area. These two models have over 12 million combined features. We discuss the capabilities of different rendering approaches such as vector tiling, aggregation techniques, and efficient binary formats. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Architecture for Web-Based Visualization of Large-Scale Energy Domains","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-2743","abstract":"In the pursuit of achieving net-zero greenhouse gas emissions by 2050, policymakers and researchers require sophisticated tools to explore and compare various climate transition scenarios. This paper introduces the Pathways Explorer, an innovative visualization tool designed to facilitate these comparisons by providing an interactive platform that allows users to select, view, and dissect multiple pathways towards sustainability. Developed in collaboration with the \u00ab\u00a0Institut de l\u2019\u00e9nergie Trottier\u00a0\u00bb (IET), this tool leverages a technoeconomic optimization model to project the energy transformation needed under different constraints and assumptions. We detail the design process that guided the development of the Pathways Explorer, focusing on user-centered design challenges and requirements. A case study is presented to demonstrate how the tool has been utilized by stakeholders to make informed decisions, highlighting its impact and effectiveness. The Pathways Explorer not only enhances understanding of complex climate data but also supports strategic planning by providing clear, comparative visualizations of potential future scenarios.","accessible_pdf":false,"authors":[{"affiliations":["Kashika Studio, Montreal, Canada"],"email":"francois.levesque@polymtl.ca","is_corresponding":false,"name":"Fran\u00e7ois L\u00e9vesque"},{"affiliations":["Polytechnique Montreal, Montreal, Canada"],"email":"louis.beaumier@polymtl.ca","is_corresponding":false,"name":"Louis Beaumier"},{"affiliations":["Polytechnique Montreal, Montreal, Canada"],"email":"thomas.hurtut@polymtl.ca","is_corresponding":true,"name":"Thomas Hurtut"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-2743","image_caption":"Pathways Explorer allows policymakers and researchers to explore and compare various climate transition scenarios.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Pathways Explorer: Interactive Visualization of Climate Transition Scenarios","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-2845","abstract":"Methane (CH4) leakage monitoring is crucial for environmental protection and regulatory compliance, particularly in the oil and gas industries. Reducing CH4 emissions helps advance green energy by converting it into a valuable energy source through innovative capture technologies. A real-time continuous monitoring system (CMS) is necessary to detect fugitive and intermittent emissions and provide actionable insights. Integrating spatiotemporal data from satellites, airborne sensors, and ground sensors with inventory data and the weather research and forecasting (WRF) model creates a comprehensive dataset, making CMS feasible but posing significant challenges. These challenges include data alignment and fusion, managing heterogeneity, handling missing values, ensuring resolution integrity, and maintaining geometric and radiometric accuracy. This study outlines the procedure for methane leakage detection, addressing challenges at each step and offering solutions through machine learning and data analysis. It further details how visual analytics can be implemented to improve the effectiveness of the various aspects of emission monitoring.","accessible_pdf":false,"authors":[{"affiliations":["University of Oklahoma, Norman, United States"],"email":"parisa.masnadi@ou.edu","is_corresponding":true,"name":"Parisa Masnadi Khiabani"},{"affiliations":["University of Oklahoma, Norman, United States"],"email":"danala@ou.edu","is_corresponding":false,"name":"Gopichandh Danala"},{"affiliations":["University of Oklahoma, Norman, United States"],"email":"wolfgang.jentner@uni-konstanz.de","is_corresponding":false,"name":"Wolfgang Jentner"},{"affiliations":["University of Oklahoma, Oklahoma, United States"],"email":"ebert@ou.edu","is_corresponding":false,"name":"David Ebert"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-2845","image_caption":"The image shows how integrating top-down and bottom-up approaches for methane leakage detection addresses methodological gaps, enhancing the detection and understanding of emission sources and rates. This integration enables cross-validation, which improves both top-down and bottom-up modeling. Every step contributes to visualization, yet data analysis and visual analytics are not only crucial for providing precise feedback for modeling but also integral in enhancing each step of the process. These tools are key for tackling challenges in data integration, effectively managing information, and uncovering hidden patterns, ensuring continuous improvement across all stages.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Challenges in Data Integration, Monitoring, and Exploration of Methane Emissions: The Role of Data Analysis and Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-3496","abstract":"Transmission System Operators (TSO) often need to integrate multiple sources of information to make decisions in real time.In cases where a single power line goes offline, due to a natural event or scheduled outage, there typically will be a contingency plan that the TSO may utilize to mitigate the situation. In cases where two or more power lines go offline, this contingency plan is no longer valid, and they must re-prepare and reason about the network in real time. A key network property that must be balanced is loadability--the range of permissible voltage levels for a specific bus (or node), understood as a function of power and its active (P) and reactive (Q) components. Loadability provides information of how much more demand a specific node can handle, before system became unstable. To increase loadability, the TSO can potentially make control actions that raise or lower P or Q, which results in change the voltage levels required to be within permissible limits. While many methods exist to calculate loadability and represent loadability to end users, there has been little focus on tailoring loadability visualizations to the unique needs of TSOs. In this paper we involve operations domain experts in a human centered design process to prototype two new loadability visualizations for TSOs. We contribute a design paper that yields: (1) a working model of the operator's decision making process, (2) example artifacts of the two data visualization techniques, and (3) a critical qualitative expert review of our designs.","accessible_pdf":false,"authors":[{"affiliations":["Hitachi Energy Research, Montreal, Canada"],"email":"dmarino@cim.mcgill.ca","is_corresponding":true,"name":"David Marino"},{"affiliations":["Carleton University, Ottawa, Canada"],"email":"maxwellkeleher@cmail.carleton.ca","is_corresponding":false,"name":"Maxwell Keleher"},{"affiliations":["Hitachi Energy Research, Krakow, Poland"],"email":"krzysztof.chmielowiec@hitachienergy.com","is_corresponding":false,"name":"Krzysztof Chmielowiec"},{"affiliations":["Hitachi Energy Research, Montreal, Canada"],"email":"antony.hilliard@hitachienergy.com","is_corresponding":false,"name":"Antony Hilliard"},{"affiliations":["Hitachi Energy Research, Krakow, Poland"],"email":"pawel.dawidowski@hitachienergy.com","is_corresponding":false,"name":"Pawel Dawidowski"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-3496","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Operator-Centered Design of a Nodal Loadability Network Visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-4135","abstract":"This paper presents a dashboard to find and compare days with similar weather patterns within an 80-year historical weather dataset. The dashboard facilitates the analysis of weather patterns and their impact on renewable energy generation by defining and identifying similar weather days. Users are given the flexibility to select the metric for determining similarity, which includes a combination of temperature, dew point, wind speed, Global Horizontal Irradiance (GHI), Direct Horizontal Irradiance (DHI), and cloud cover. The region for this work is limited to Texas. The dashboard then generates an output that compares the selected weather metrics and the corresponding renewable generation outputs.","accessible_pdf":false,"authors":[{"affiliations":["Texas A","M University, College Station, United States"],"email":"sanjanakunkolienkar@tamu.edu","is_corresponding":true,"name":"Sanjana Kunkolienkar"},{"affiliations":["Texas A","M University, College Station, United States"],"email":"nislavch@tamu.edu","is_corresponding":false,"name":"Nikola Slavchev"},{"affiliations":["Texas A","M University , College Station, United States"],"email":"fsafdarian@tamu.edu","is_corresponding":false,"name":"Farnaz Safdarian"},{"affiliations":["Texas A","M University, College Station, United States"],"email":"overbye@tamu.edu","is_corresponding":false,"name":"Thomas Overbye"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-energyvis-4135","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Developing a Dashboard To Enhance Visualization of Similar Historical Weather Patterns and Renewable Energy Generation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-4332","abstract":"The rapid growth of the solar energy industry requires advanced educational tools to train the next generation of engineers and technicians. We present a novel system for situated visualization of photovoltaic (PV) module performance, leveraging a combination of PV simulation, sun-sky position, and head-mounted augmented reality (AR). Our system is guided by four principles of development: simplicity, adaptability, collaboration, and maintainability, realized in six components. Users interactively manipulate a physical module's orientation and shading referents with immediate feedback on the module's performance.","accessible_pdf":true,"authors":[{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"nicholas.brunhart-lupo@nrel.gov","is_corresponding":false,"name":"Nicholas Brunhart-Lupo"},{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"kenny.gruchalla@nrel.gov","is_corresponding":true,"name":"Kenny Gruchalla"},{"affiliations":["Fort Lewis College, Durango, United States"],"email":"williams_l@fortlewis.edu","is_corresponding":false,"name":"Laurie Williams"},{"affiliations":["Fort Lewis College, Durango, United States"],"email":"selias@fortlewis.edu","is_corresponding":false,"name":"Steve Ellis"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-4332","image_caption":"A simulated image showing a photovoltaic module's performance for workforce training. An augmented reality projection overlays simulation results onto a physical panel, depicting power flow with arrow and pipe glyphs. Sunlit cells are highlighted in yellow. Shadowed cells are bypassed by diodes and marked with spheres. The optical tracking marker in the foreground relays the panel\u2019s orientation to the system. Users can tilt or rotate the physical panel, adjust the virtual sun\u2019s position using time and geo-coordinate controls, and add virtual occluding objects to explore panel behavior under various conditions.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Situated Visualization of Photovoltaic Module Performance for Workforce Development","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-5170","abstract":"Scenario studies are a technique for representing a range of possible complex decisions through time, and analyzing the impact of those decisions on future outcomes of interest. It is common to use scenarios as a way to study potential pathways towards future build out and decarbonization of energy systems. The results of these studies are often used by diverse energy system stakeholders \u2014 such as community organizations, power system utilities, and policymakers \u2014 for decision-making using data visualization. However, the role of visualization in facilitating decision-making with energy scenario data is not well understood. In this work, we review common visualization designs employed in energy scenario studies and discuss the effectiveness of some of these techniques in facilitating different types of analysis with scenario data.","accessible_pdf":true,"authors":[{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"sam.molnar@nrel.gov","is_corresponding":true,"name":"Sam Molnar"},{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"kenny.gruchalla@nrel.gov","is_corresponding":false,"name":"Kenny Gruchalla"},{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"graham.johnson@nrel.gov","is_corresponding":false,"name":"Graham Johnson"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"kristi.potter@nrel.gov","is_corresponding":false,"name":"Kristi Potter"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-5170","image_caption":"Two visualizations of renewable site location and capacities for four different energy scenarios. a) Each site has a radar plot where the distance from the center indicates the capacity for the labeled scenario, as shown in the legend. Wind and solar sites are plotted as separate colors (blue and yellow, respectively). b) An aggregated visualization of scenario data where each site is colored according to the number of scenarios it occurs in and the resource type.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Opportunities and Challenges in the Visualization of Energy Scenarios for Decision-Making","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-6102","abstract":"This paper introduces CPIE (Coal Pollution Impact Explorer), a spatiotemporal visual analytic tool developed for interactive visualization of coal pollution impacts. CPIE visualizes electricity-generating units (EGUs) and their contributions to statewide Medicare deaths related to coal PM2.5 emissions. The tool is designed to make scientific findings on the impacts of coal pollution more accessible to the general public and to raise awareness of the associated health risks. We present three use cases for CPIE: 1) the overall spatial distribution of all 480 facilities in the United States, their statewide impact on excess deaths, and the overall decreasing trend in deaths associated with coal pollution from 1999 to 2020; 2) the influence of pollution transport, where most deaths associated with the facilities located within the same state and neighboring states but some deaths occur far away; and 3) the effectiveness of intervention regulations, such as installing emissions control devices and shutting down coal facilities, in significantly reducing the number of deaths associated with coal pollution.","accessible_pdf":false,"authors":[{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"sjin86@gatech.edu","is_corresponding":true,"name":"Sichen Jin"},{"affiliations":["George Mason University, Fairfax, United States"],"email":"lhennem@gmu.edu","is_corresponding":false,"name":"Lucas Henneman"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"jessica.roberts@cc.gatech.edu","is_corresponding":false,"name":"Jessica Roberts"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-6102","image_caption":"The user interface of CPIE shows the coal pollution impacts when Pennsylvania is selected. It consists of (A) a choropleth map view highlighting facilities in Pennsylvania and showing statewide deaths associated with all facilities in Pennsylvania, (B) a choropleth map displaying the number of deaths in Pennsylvania attributable to facilities in other states, and (C) a stacked line chart showing the changes in deaths associated with all Pennsylvania facilities from 1999 to 2020. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-6102/w-energyvis-6102_Preview.mp4?token=EINVooRDQh8xi0eCB3e_DTaID96kxyaD7YNFSvTZO1E&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-6102/w-energyvis-6102_Preview.srt?token=6t7FqJGJP_zsFQvtp7VhnXedhhpPkGgOMvEluTNJ9fI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"bhNcOjTG8IQ","session_youtube_ff_link":"https://youtu.be/bhNcOjTG8IQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"CPIE: A Spatiotemporal Visual Analytic Tool to Explore the Impact of Coal Pollution","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-9750","abstract":"This paper presents a novel open system, ChatGrid, for easy, intuitive, and interactive geospatial visualization of large-scale transmission networks. ChatGrid uses state-of-the-art techniques for geospatial visualization of large networks, including 2.5D map views, animated flows, hierarchical and level-based filtering and aggregation to provide visual information in an easy, cognitive manner. The highlight of ChatGrid is a natural language query based interface powered by a large language model (ChatGPT) that offers a natural and flexible interactive experience whereby users can ask questions and ChatGrid provides responses both in text and visually. This paper discusses the architecture, implementation, design decisions, and usage of large language models for ChatGrid.","accessible_pdf":false,"authors":[{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"sjin86@gatech.edu","is_corresponding":true,"name":"Sichen Jin"},{"affiliations":["Pacific Northwest National Laboratory, Richland, United States"],"email":"shrirang.abhyankar@pnnl.gov","is_corresponding":false,"name":"Shrirang Abhyankar"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-9750","image_caption":"ChatGrid interface displaying the visualization and query interface. Queries asked by users are responded through both text and visualization. The vertical bars represent the generation sources that have a remaining capacity greater than 100 MW.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-9750/w-energyvis-9750_Preview.mp4?token=G43Qaxtn7P8w0WvFMF6Rbu2gvhAUgB_s5zRe3DjAHp0&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-9750/w-energyvis-9750_Preview.srt?token=O-gp28W-cRZ9vCDRgQWuEnavIHuHWp2rtLuQc8tryn8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"v_T0stnFeb8","session_youtube_ff_link":"https://youtu.be/v_T0stnFeb8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"ChatGrid: Power Grid Visualization Empowered by a Large Language Model","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-energyvis-9875","abstract":"Large-scale power outages, such as those caused by extreme weather events, have a big impact on human behavior. A short power outage is merely a nuisance for most, and may not change people's locations. An outage that lasts for a few hours can result in spoiled food and medical supplies, and people will have to restock spoiled items. Long outages result in temperatures outside tolerable levels in homes, and may prompt people to acquire supplies, such as generators and gas, or change location. The long outages during Winter Storm Uri in Texas resulted in millions of dollars in property damage due to freezing pipes. This level of damage is expected to result in a sharp increase in supply runs and contractor activity. In this paper, we present a tool to explore differences in visiting patterns before, during, and after power outages. It allows to compare different points of interest like medical facilities, grocery stores, hardware stores, and other types of businesses.","accessible_pdf":false,"authors":[{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"andy.berres@gmail.com","is_corresponding":true,"name":"Andy S Berres"},{"affiliations":["Institute of Computer Science, Leipzig University, Leipzig, Germany"],"email":"nsonga@informatik.uni-leipzig.de","is_corresponding":false,"name":"Baldwin Nsonga"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"caitlyn.clark6@icloud.com","is_corresponding":false,"name":"Caitlyn Clark"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"bobby.jeffers@nrel.gov","is_corresponding":false,"name":"Robert Jeffers"},{"affiliations":["University of Kaiserslautern, Kaiserslautern, Germany"],"email":"hagen@cs.uni-kl.de","is_corresponding":false,"name":"Hans Hagen"},{"affiliations":["Leipzig University, Leipzig, Germany"],"email":"scheuermann@informatik.uni-leipzig.de","is_corresponding":false,"name":"Gerik Scheuermann"}],"award":"","doi":"","event_id":"w-energyvis","event_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-energyvis-9875","image_caption":"We present a visual analysis of the impact of the 2021 Texas Power Crisis on building occupancy in Austin, Texas. In February 2021, Winter Storm Uri caused temperatures to rapidly drop up to 50\u2109/25\u2103 below typical Texas winter temperatures (see comparison on the top left), and due to the isolated nature of the Texas powergrid, there was little room for compensation for the additional load and . The top right shows a heatmap comparison of power outages over time (x-axis) for different Texas counties (y-axis). The red line indicates the threshold for the 10% most affected counties (in the tool itself, hovering reveals more information about the counties and the extent of the outages). The tool provides navigation elements for users to select two timeframes they want to compare. In this case, we chose the 3 days with most intense outages, and an equivalent 3-day window two weeks prior, before the winter storm hit. The bottom shows buildings colored by POI type (for buildings with multiple POI, we chose the type with the highest importance \u2013 shown in the legend on the left). The map in the middle shows increases (green) and decreases (purple) in visits during the storm, compared with pre-storm conditions. The changes in visits/occupancy by POI subtype (colored by POI type) are shown on the bottom right. Large Event Spaces (which served as cold shelters) saw an increase in occupancy that\u2019s just a little over the decrease in occupancy of residential homes, and the visits to correctional facilities dropped dramatically.\u00a0 With the exception of the weather layer, all graphics come from MoVis, an interactive prototype we developed. To learn more about the weather impact on the power grid, see our other paper \u201cExtreme Weather and the Power Grid: A Case Study of Winter Storm Uri.\u201d ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-9875/w-energyvis-9875_Preview.mp4?token=tsVlw0ZOeOysdNfQ1Q9FMVlt54O3hFivdbokYCDQRSo&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-energyvis/w-energyvis-9875/w-energyvis-9875_Preview.srt?token=1isu0PYjNMZEIjmSXAlWSh3Xpp2WWat8MM6KQ2Bx4y8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop2","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"EnergyVis 2024: 4th Workshop on Energy Data Visualization","session_uid":"w-energyvis","session_youtube_ff_id":"al9x4utB7ss","session_youtube_ff_link":"https://youtu.be/al9x4utB7ss","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["EnergyVis 2024: 4th Workshop on Energy Data Visualization"],"time_stamp":"2024-10-14T16:00:00Z","title":"Evaluating the Impact of Power Outages on Occupancy Patterns During the 2021 Texas Power Crisis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1008","abstract":"With the increasing amount of data globally, analyzing and visualizing data are becoming essential skills across various professions. It is important to equip university students with these essential data skills. To learn, design, and develop data visualization, students need knowledge of programming and data science topics. Many university programs lack dedicated data science courses for undergraduate students, making it important to introduce these concepts through integrated courses. However, combining data science and data visualization into one course can be challenging due to the time constraints and the heavy load of learning. In this paper, we discuss the development of teaching data science and data visualization together in one course and share the results of the post-course evaluation survey. From the survey's results, we identified four challenges, including difficulty in learning multiple tools and diverse data science topics, varying proficiency levels with tools and libraries, and selecting and cleaning datasets. We also distilled five opportunities for developing a successful data science and visualization course. These opportunities include clarifying the course structure, emphasizing visualization literacy early in the course, updating the course content according to student needs, using large real-world datasets, learning from industry professionals, and promoting collaboration among students.","accessible_pdf":true,"authors":[{"affiliations":["Carleton University, Ottawa, Canada"],"email":"shrihariniramesh@cmail.carleton.ca","is_corresponding":true,"name":"Shri Harini Ramesh"},{"affiliations":["Carleton University, Ottawa, Canada","Bruyere Research Institute, Ottawa, Canada"],"email":"fateme.rajabiyazdi@carleton.ca","is_corresponding":false,"name":"Fateme Rajabiyazdi"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1008","image_caption":"Challenges and Opportunities of Teaching Data Visualization Together with Data Science","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.05969","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/CGfeWajdPXw&t=0h47m22s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=0h47m22s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T13:10:00Z","title":"Challenges and Opportunities of Teaching Data Visualization Together with Data Science","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1013","abstract":"Academic advising can positively impact struggling students' success. We developed AdVizor, a data-driven learning analytics tool for academic risk prediction for advisors. Our system is equipped with a random forest model for grade prediction probabilities uses a visualization dashboard to allows advisors to interpret model predictions. We evaluated our system in mock advising sessions with academic advisors and undergraduate students at our university. Results show that the system can easily integrate into the existing advising workflow, and visualizations of model outputs can be learned through short training sessions. AdVizor supports and complements the existing expertise of the advisor while helping to facilitate advisor-student discussion and analysis. Advisors found the system assisted them in guiding student course selection for the upcoming semester. It allowed them to guide students to prioritize the most critical and impactful courses. Both advisors and students perceived the system positively and were interested in using the system in the future. Our results encourage the development of intelligent advising systems in higher education, catered for advisors.","accessible_pdf":false,"authors":[{"affiliations":["Ontario Tech University, Oshawa, Canada"],"email":"riley.weagant@ontariotechu.net","is_corresponding":false,"name":"Riley Weagant"},{"affiliations":["Ontario Tech University, Oshawa, Canada"],"email":"zixin.zhao@ontariotechu.net","is_corresponding":true,"name":"Zixin Zhao"},{"affiliations":["Ontario Tech University, Oshawa, Canada"],"email":"abradley@uncharted.software","is_corresponding":false,"name":"Adam Badley"},{"affiliations":["Ontario Tech University, Oshawa, Canada"],"email":"christopher.collins@ontariotechu.ca","is_corresponding":false,"name":"Christopher Collins"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1013","image_caption":"Figure of a student and academic advisor sitting across from each other with a computer screen between them, on top is a zoomed out image of the AdVizor interface.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/XeytaUH5Z8c&t=0h28m39s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1013/w-eduvis-1013_Preview.mp4?token=MEibWd4aMD5VjMhdpUG36Mlq-IT4gyQHec2_Pzkw40I&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1013/w-eduvis-1013_Preview.srt?token=QZwYkdAHRWd5UxGYaX72aY20X_jhmYmrtp3uAB2V54w&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"0srC2ClVQTY","session_youtube_ff_link":"https://youtu.be/0srC2ClVQTY","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/XeytaUH5Z8c&t=0h28m39s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T13:10:00Z","title":"AdVizor: Using Visual Explanations to Guide Data-Driven Student Advising","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1020","abstract":"In this paper, we discuss our experiences advancing a professional-oriented graduate program in Cartography & GIScience at the University of Wisconsin-Madison to account for fundamental shifts in conceptual framings, rapidly evolving mapping technologies, and diverse student needs. We focus our attention on considerations for the cartography curriculum given its relevance to (geo)visualization education and map literacy. We reflect on challenges associated with, and lessons learned from, developing a comprehensive and cohesive cartography curriculum across in-person and online learning modalities for a wide range of professional student audiences.","accessible_pdf":true,"authors":[{"affiliations":["University of Wisconsin-Madison, Madison, United States"],"email":"jknelson3@wisc.edu","is_corresponding":true,"name":"Jonathan Nelson"},{"affiliations":["University of Wisconsin-Madison, Madison, United States"],"email":"limpisathian@wisc.edu","is_corresponding":false,"name":"P. William Limpisathian"},{"affiliations":["University of Wisconsin-Madison, Madison, United States"],"email":"reroth@wisc.edu","is_corresponding":false,"name":"Robert Roth"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1020","image_caption":"Developing and maintaining a robust cartography curriculum is challenging yet essential for meeting the needs of the professional cartographer. The cartography curriculum at the University of Wisconsin-Madison (2024-25) is organized within a conceptual framework, consisting of an orthogonal pair of axes to capture both the traditional distinction between mapmaking and map use and the more contemporary distinction between cartographic representation and interaction. The curriculum is collaboratively developed, conceptually-grounded, technologically diverse, and integrated with open educational resources to ensure it remains current, relevant, and synchronized across in-person/online learning modalities.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/CGfeWajdPXw&t=0h57m54s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=0h57m54s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T13:10:00Z","title":"Developing a Robust Cartography Curriculum to Train the Professional Cartographer","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1025","abstract":"Systems thinking is fundamental for understanding complex problems. Addressing twenty-first century challenges like climate change requires comprehending how different components of Earth systems influence each other. The carbon cycle, crucial to our planet\u2019s climate system, is a powerful context for helping the rising generation develop systems thinking skills. Traditional 2-D static images often fail to convey the complexities of the carbon cycle, making it challenging for learners. These representations do not communicate dynamic features of the carbon cycle, such as its multiple scales and interconnected processes. We hypothesize that interactive visualization can aid learning by enabling dynamic exploration and consideration of human impacts, thereby fostering systems thinking. ","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"mina.mani@liu.se","is_corresponding":true,"name":"Mina Mani"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"konrad.schonborn@liu.se","is_corresponding":false,"name":"Konrad J Sch\u00f6nborn"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-eduvis-1025","image_caption":"","keywords":[],"open_access_supplemental_link":"https://nightingaledvs.com/tracing-carbon-visualization-for-systems-thinking/","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/CGfeWajdPXw&t=1h10m22s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=1h10m22s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T13:10:00Z","title":"Tracing Carbon: Visualization for Systems Thinking","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1018","abstract":"In this article, we discuss an experience with design and situated learning in the Creative Data Visualization course, part of the Visual Communication Design undergraduate program at the Federal University of Rio de Janeiro, a free, public Brazilian university that, thanks to affirmative action policies, has become more inclusive over the years. We begin with a brief introduction to the terms Situated Knowledge, coined by Donna Haraway, Situated Design, based on the former concept, and Situated Learning. We then examine the similarities and differences between these notions and the term Situated Visualization to present a model for the concept of Situated Learning in Information Visualization. Following this foundation, we describe the applied methodology, emphasizing the importance of integrating real-world contexts into students\u2019 projects. As a case study, we present three student projects produced as final assignments for the course. Through this article, we aim to underscore the articulation of situated design concepts in information visualization activities and contribute to teaching and learning practices in this field, particularly within the Global South.","accessible_pdf":false,"authors":[{"affiliations":["Universidade Federal do Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"doriskos@eba.ufrj.br","is_corresponding":false,"name":"Doris Kosminsky"},{"affiliations":["Federal University of Rio de Janeiro, Rio de Janeiro, Brazil"],"email":"renata.perim@ufrj.br","is_corresponding":false,"name":"Renata Perim Lopes"},{"affiliations":["UFRJ, RJ, Brazil","IBGE, RJ, Brazil"],"email":"regina.reznik@ufrj.br","is_corresponding":false,"name":"Regina Reznik"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1018","image_caption":"The image displays a diagram on the left side of the page, featuring four nested circles, each symbolizing a stage in the Situated Learning model for information visualization. The outermost circle is labeled \"situated contexts,\" linked to \"location,\" covering space, time, place, activity, and social aspects. The second circle, \"collecting data,\" is connected to \"embodied skills.\" The third circle, \"mapping & design\", also links to \"embodied skills.\" The innermost circle is \"presentation,\" linked to \"partial view.\" The right side shows the VIS2024 conference logo.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/CGfeWajdPXw&t=1h18m42s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1018/w-eduvis-1018_Preview.mp4?token=0BoqkCys97DIKZNWWtFaOEk1WrA-Y6I3NAXl2h8L-Vs&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1018/w-eduvis-1018_Preview.srt?token=OeV-pHQ_GBKtBOLIFN4Aaneggy3pcOXM9z1rOXRlxQ8&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"-jQLve3cCL8","session_youtube_ff_link":"https://youtu.be/-jQLve3cCL8","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=1h18m42s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T14:15:00Z","title":"Teaching Information Visualization through Situated Design: Case Studies from the Classroom","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1028","abstract":"With the decreasing cost of consumer display technologies making it easier for universities to have larger displays in classrooms, and the ubiquitous use of online tools such as collaborative whiteboards for remote learning during the COVID-19 pandemic, combining the two can be useful in higher education. This is especially true in visually intensive classes, such as data visualization courses, that can benefit from additional \"space to teach,\" coined after the \"space to think\" sense-making idiom. In this paper, we reflect on our approach to using SAGE3, a collaborative whiteboard with advanced features, in higher education to teach visually intensive classes, provide examples of activities from our own visually-intensive courses, and present student feedback. We gather our observations into usage patterns for using content-rich canvases in education.","accessible_pdf":false,"authors":[{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"jessemh@vt.edu","is_corresponding":true,"name":"Jesse Harden"},{"affiliations":["University of Hawaii at Manoa, Honolulu, United States"],"email":"nuritk@hawaii.edu","is_corresponding":false,"name":"Nurit Kirshenbaum"},{"affiliations":["University of Hawaii at Manoa, Honolulu, United States"],"email":"tabalbar@hawaii.edu","is_corresponding":false,"name":"Roderick S Tabalba Jr."},{"affiliations":["University of Hawaii at Manoa, Honolulu, United States"],"email":"rtheriot@hawaii.edu","is_corresponding":false,"name":"Ryan Theriot"},{"affiliations":["The University of Hawai'i at M\u0101noa, Honolulu, United States"],"email":"mlr2010@hawaii.edu","is_corresponding":false,"name":"Michael L. Rogers"},{"affiliations":["University of Hawaii at Manoa, Honolulu, United States"],"email":"mahdi@hawaii.edu","is_corresponding":false,"name":"Mahdi Belcaid"},{"affiliations":["Virginia Tech, Blacksburg, United States"],"email":"north@vt.edu","is_corresponding":false,"name":"Chris North"},{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"renambot@uic.edu","is_corresponding":false,"name":"Luc Renambot"},{"affiliations":["University of Illinois at Chicago, Chicago, United States"],"email":"llong4@uic.edu","is_corresponding":false,"name":"Lance Long"},{"affiliations":["University of Illinois Chicago, Chicago, United States"],"email":"ajohnson@uic.edu","is_corresponding":false,"name":"Andrew E Johnson"},{"affiliations":["University of Hawaii at Manoa, Honolulu, United States"],"email":"leighj@hawaii.edu","is_corresponding":false,"name":"Jason Leigh"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1028","image_caption":"A professor using an online whiteboard, SAGE3, for an in-person class with a very large display. On the online whiteboard are multiple slides of PowerPoint slide decks, saved as PDFs, and various sticky notes from student contributions.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/CGfeWajdPXw&t=1h30m26s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=1h30m26s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T14:15:00Z","title":"Space to Teach: Content-Rich Canvases for Visually-Intensive Education","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1029","abstract":"Data-art blends visualisation, data science, and artistic expression. It allows people to transform information and data into exciting and interesting visual narratives.Hosting a public data-art hands-on workshop enables participants to engage with data and learn fundamental visualisation techniques. However, being a public event, it presents a range of challenges. We outline our approach to organising and conducting a public workshop, that caters to a wide age range, from children to adults. We divide the tutorial into three sections, focusing on data, sketching skills and visualisation. We place emphasis on public engagement, and ensure that participants have fun while learning new skills.","accessible_pdf":true,"authors":[{"affiliations":["Bangor University, Bangor, United Kingdom"],"email":"j.c.roberts@bangor.ac.uk","is_corresponding":true,"name":"Jonathan C Roberts"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1029","image_caption":"Data-art blends visualisation, data science, and artistic expression. We outline our approach to organising and conducting a public workshop, that caters to a wide age range. We divide the tutorial into three sections, focusing on data, sketching skills and visualisation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.04750","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/CGfeWajdPXw&t=1h43m12s","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1029/w-eduvis-1029_Preview.mp4?token=IFMi0sw4B2ZjhXBaG5uL5V7PS9ZDz27LtpdpwNjm7SQ&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-eduvis/w-eduvis-1029/w-eduvis-1029_Preview.srt?token=IORjrd025DzhF-yHbmSCWdQlImmmgO71TFEjP3jCyNI&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"qO_Uj50TocQ","session_youtube_ff_link":"https://youtu.be/qO_Uj50TocQ","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=1h43m12s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T14:15:00Z","title":"Engaging Data-Art: Conducting a Public Hands-On Workshop","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1026","abstract":"For over half a century, science centers have been key in communicating science, aiming to increase interest and curiosity in STEM, and promote lifelong learning. Science centers integrate interactive technologies like dome displays, touch tables, VR and AR for immersive learning. Visitors can explore complex phenomena, such as conducting a virtual autopsy. Also, the shift towards digitally interactive exhibits has expanded science centers beyond physical locations to virtual spaces, extending their reach into classrooms. Our investigation revealed several key factors for impactful school visits involving interactive data visualization such as full-dome movies, provide unique perspectives about vast and microscopic phenomena. Hands-on discovery allows pupils to manipulate and investigate data, leading to deeper engagement. Collaborative interaction fosters active learning through group participation. Additionally, clear curriculum connections ensure that visits are pedagogically meaningful. We propose a three-stage model for school visits. The \"Experience\" stage involves immersive visual experiences to spark interest. The \"Engagement\" stage builds on this by providing hands-on interaction with data visualization exhibits. The \"Applicate\" stage offers opportunities to apply and create using data visualization. A future goal of the model is to broaden STEM reach, enabling pupils to benefit from data visualization experiences even if they cannot visit centers.","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping university, Norrk\u00f6ping, Sweden"],"email":"andreas.c.goransson@liu.se","is_corresponding":true,"name":"Andreas G\u00f6ransson"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"konrad.schonborn@liu.se","is_corresponding":false,"name":"Konrad J Sch\u00f6nborn"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1026","image_caption":"Example of digital science center environment at Norrk\u00f6ping Visualization Center C, Sweden. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/CGfeWajdPXw&t=1h57m2s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=1h57m2s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T14:15:00Z","title":"What makes school visits to digital science centers successful?","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1030","abstract":"We propose to leverage the recent development in Large Language Models, in combination to data visualization software and devices in science centers and schools in order to foster more personalized learning experiences. The main goal with our endeavour is to provide to pupils and visitors the same experience they would get with a professional facilitator when interacting with data visualizations of complex scientific phenomena. We describe the results from our early prototypes and the intended implementation and testing of our idea.","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"lonni.besancon@gmail.com","is_corresponding":false,"name":"Lonni Besan\u00e7on"},{"affiliations":["LiU Link\u00f6ping Universitet, Norrk\u00f6ping, Sweden"],"email":"mathis.brossier@liu.se","is_corresponding":true,"name":"Mathis Brossier"},{"affiliations":["King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"],"email":"omar.mena@kaust.edu.sa","is_corresponding":false,"name":"Omar Mena"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"erik.sunden@liu.se","is_corresponding":false,"name":"Erik Sund\u00e9n"},{"affiliations":["Link\u00f6ping university, Norrk\u00f6ping, Sweden"],"email":"andreas.c.goransson@liu.se","is_corresponding":false,"name":"Andreas G\u00f6ransson"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"anders.ynnerman@liu.se","is_corresponding":false,"name":"Anders Ynnerman"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"konrad.schonborn@liu.se","is_corresponding":false,"name":"Konrad J Sch\u00f6nborn"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1030","image_caption":"The portable globe that we aim to bring to schools so that students can directly ask questions to it.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/CGfeWajdPXw&t=2h6m34s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3a","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/CGfeWajdPXw&t=2h6m34s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 1)"],"time_stamp":"2024-10-13T16:00:00Z","title":"TellUs \u2013 Leveraging the power of LLMs with visualization to benefit science centers.","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1031","abstract":"In this reflective essay, we explore how educational science can be relevant for visualization research, addressing beneficial intersections between the two communities. While visualization has become integral to various areas, including education, our own ongoing collaboration has induced reflections and discussions we believe could benefit visualization research. In particular, we identify five key perspectives: surpassing traditional evaluation metrics by incorporating established educational measures; defining constructs based on existing learning and educational research frameworks; applying established cognitive theories to understand interpretation and interaction with visualizations; establishing uniform terminology across disciplines; and, fostering interdisciplinary convergence. We argue that by integrating educational research constructs, methodologies, and theories, visualization research can further pursue ecological validity and thereby improve the design and evaluation of visual tools. Our essay emphasizes the potential of intensified and systematic collaborations between educational scientists and visualization researchers to advance both fields, and in doing so craft visualization systems that support comprehension, retention, transfer, and critical thinking. We argue that this reflective essay serves as a first point of departure for initiating dialogue that, we hope, could help further connect educational science and visualization, by proposing future empirical studies that take advantage of interdisciplinary approaches of mutual gain to both communities.","accessible_pdf":false,"authors":[{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"konrad.schonborn@liu.se","is_corresponding":false,"name":"Konrad J Sch\u00f6nborn"},{"affiliations":["Link\u00f6ping University, Norrk\u00f6ping, Sweden"],"email":"lonni.besancon@gmail.com","is_corresponding":false,"name":"Lonni Besan\u00e7on"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1031","image_caption":"In this reflective essay, we explore how educational science can be relevant for visualization research, addressing beneficial intersections between the two communities.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/8jbmz","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/XeytaUH5Z8c&t=0h1m14s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3b","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/XeytaUH5Z8c&t=0h1m14s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)"],"time_stamp":"2024-10-13T16:00:00Z","title":"What Can Educational Science Offer Visualization? A Reflective Essay","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1027","abstract":"Parallel coordinate plots (PCPs) are gaining popularity in data exploration, statistical analysis, predictive analysis along with for data-driven storytelling. In this paper, we present the results of a post-hoc analysis of a dataset from a PCP literacy intervention to identify barriers to PCP literacy. We analyzed question responses and inductively identified barriers to PCP literacy. We performed group coding on each individual response and identified new barriers to PCP literacy. Based on our analysis, we present a extended and enhanced list of barriers to PCP literacy. Our findings have implications towards educational interventions targeting PCP literacy and can provide an approach for students to learn about PCPs through active learning.","accessible_pdf":false,"authors":[{"affiliations":["University of San Francisco, San Francisco, United States"],"email":"csrinivas2@dons.usfca.edu","is_corresponding":false,"name":"Chandana Srinivas"},{"affiliations":["Cukurova University, Adana, Turkey"],"email":"elifemelfirat@gmail.com","is_corresponding":false,"name":"Elif E. Firat"},{"affiliations":["University of Nottingham, Nottingham, United Kingdom"],"email":"robert.laramee@nottingham.ac.uk","is_corresponding":false,"name":"Robert S. Laramee"},{"affiliations":["University of San Francisco, San Francisco, United States"],"email":"apjoshi@usfca.edu","is_corresponding":true,"name":"Alark Joshi"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1027","image_caption":"This figure shows the methodology used to inductively identify an enhanced list of PCP literacy barriers.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://www.cs.usfca.edu/~apjoshi/papers/2024_EduVis_PCP_Barriers.pdf","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/XeytaUH5Z8c&t=0h14m20s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3b","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/XeytaUH5Z8c&t=0h14m20s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)"],"time_stamp":"2024-10-13T16:00:00Z","title":"An Inductive Approach for Identification of Barriers to PCP Literacy","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1010","abstract":"This report examines the implementation of the Solution Framework in a social impact project facilitated by VizForSocialGood. It outlines the data visualization process, detailing each stage and offering practical insights. The framework's application demonstrates its effectiveness in enhancing project quality, efficiency, and collaboration, making it a valuable tool for educational and professional environments.","accessible_pdf":false,"authors":[{"affiliations":["Independent Information Designer, Medellin, Colombia","Independent Information Designer, Medellin, Colombia"],"email":"munozdataviz@gmail.com","is_corresponding":false,"name":"Victor Mu\u00f1oz"},{"affiliations":["Corporate Information Designer, Arlington Hts, United States","Corporate Information Designer, Arlington Hts, United States"],"email":"hellokevinford@gmail.com","is_corresponding":false,"name":"Kevin Ford"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1010","image_caption":"This image contains chat logs of the interaction between a mentor and a mentee, implementing the Solution Framework in a social impact project. The conversations reflect collaboration and guidance in refining a data visualization, providing a practical model for practitioners to document their workflows and mentoring strategies.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/XeytaUH5Z8c&t=0h42m18s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3b","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/XeytaUH5Z8c&t=0h42m18s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)"],"time_stamp":"2024-10-13T16:00:00Z","title":"Implementing the Solution Framework in a Social Impact Project","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-eduvis-1007","abstract":"Visualizations are a critical medium not only for telling stories, but for fostering exploration.But while there are countless examples how to use visualizations for\u201cstorytelling with data,\u201d there are few guidelines on how to design visualizations for public exploration.This educator report draws on decades of work in science museums, a public context focused on designing interactive experiences for exploration, to provide evidence-based guidelines for designing exploratory visualizations.Recent studies on interactive visualizations in museums are contextualized within a larger body of museum research on designs that support exploratory learning in interactive exhibits.Synthesizing these studies highlights that to create successful exploratory visualizations, designers can apply long-standing guidelines from exhibit design but need to provide more aids for interpretation.","accessible_pdf":false,"authors":[{"affiliations":["Science Communication Lab, Berkeley, United States","University of California, San Francisco, San Francisco, United States"],"email":"jafrazier@gmail.com","is_corresponding":true,"name":"Jennifer Frazier"}],"award":"","doi":"","event_id":"w-eduvis","event_title":"EduVis: Workshop on Visualization Education, Literacy, and Activities","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-eduvis-1007","image_caption":"Museums visitors using an interactive visualization at the Exploratorium (image credit: Amy Snyder).","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"https://youtu.be/XeytaUH5Z8c&t=0h48m37s","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop3b","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)","session_uid":"w-eduvis","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"https://youtu.be/XeytaUH5Z8c&t=0h48m37s","sessions":["EduVis: 2nd IEEE VIS Workshop on Visualization Education, Literacy, and Activities (Session 2)"],"time_stamp":"2024-10-13T16:00:00Z","title":"Beyond storytelling with data: Guidelines for designing exploratory visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-pdav-1006","abstract":"The healthcare system collects extensive data, encompassing patient administrative information, clinical measurements, and home-monitored health metrics. To support informed decision-making in patient care and treatment management, it is essential to review and analyze these diverse data sources. Data visualization is a promising solution to navigate healthcare datasets, uncover hidden patterns, and derive actionable insights. However, the process of creating interactive data visualization can be rather challenging due to the size and complexity of these datasets. Progressive data science offers a potential solution, enabling interaction with intermediate results during data exploration. In this paper, we reflect on our experiences with three health data visualization projects employing a progressive data science approach. We explore the practical implications and challenges faced at various stages, including data selection, pre-processing, data mining, transformation, and interpretation and evaluation.We highlighted unique challenges and opportunities for three projects, including visualizing surgical outcomes, tracking patient bed transfers, and integrating patient-generated data visualizations into the healthcare setting.We identified the following challenges: inconsistent data collection practices, the complexity of adapting to varying data completeness levels, and the need to modify designs for real-world deployment. Our findings underscore the need for careful consideration of using a progressive data science approach when designing visualizations for healthcare settings.","accessible_pdf":false,"authors":[{"affiliations":["Carleton University, Ottawa, Canada"],"email":"faisalzaki@cmail.carleton.ca","is_corresponding":false,"name":"Faisal Zaki Roshan"},{"affiliations":["Carleton University, Ottawa, Canada"],"email":"abhishekahuja@cmail.carleton.ca","is_corresponding":false,"name":"Abhishek Ahuja"},{"affiliations":["Carleton University, Ottawa, Canada"],"email":"fateme.rajabiyazdi@carleton.ca","is_corresponding":true,"name":"Fateme Rajabiyazdi"}],"award":"","doi":"","event_id":"w-pdav","event_title":"Progressive Data Analysis and Visualization (PDAV) Workshop.","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-pdav-1006","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop4","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Progressive Data Analysis and Visualization (PDAV) Workshop","session_uid":"w-pdav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Progressive Data Analysis and Visualization (PDAV) Workshop"],"time_stamp":"2024-10-14T12:30:00Z","title":"Practical Challenges of Progressive Data Science in Healthcare","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-pdav-1009","abstract":"In a world where data has become too large for direct human perception, scientists have developed methods for specific data exploration. Until recently, two main methodologies were used for their exploration: scientific visualization (SciVis) for data with inherent geometry (simulation/acquisition) and information visualization (InfoVis) for abstract data. Though these fields evolved in parallel, sharing journals and conferences, they had distinct challenges, methodologies, and experts. Recently, a visible transition has begun, with the two communities converging, exemplified by IEEE VIS conference removing distinct categories. In this context, we propose a high-level discussion on an open-source framework widely used in SciVis and how progressive processing and visualization could help bringing its abilities to InfoVis.","accessible_pdf":false,"authors":[{"affiliations":["Kitware SAS, Lyon, France"],"email":"charles.gueunet@kitware.com","is_corresponding":true,"name":"Charles Gueunet"},{"affiliations":["Kitware Europe, Villeurbanne, France"],"email":"francois.mazen@kitware.com","is_corresponding":false,"name":"Fran\u00e7ois Mazen"}],"award":"","doi":"","event_id":"w-pdav","event_title":"Progressive Data Analysis and Visualization (PDAV) Workshop.","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-pdav-1009","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop4","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Progressive Data Analysis and Visualization (PDAV) Workshop","session_uid":"w-pdav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Progressive Data Analysis and Visualization (PDAV) Workshop"],"time_stamp":"2024-10-14T12:30:00Z","title":"Towards a Progressive Open Source Framework for SciVis and InfoVis","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-pdav-1010","abstract":"Progressive dimensionality reduction algorithms allow for visually investigating intermediate results, especially for large data sets. While different algorithms exist that progressively increase the number of data points, we propose an algorithm that allows for increasing the number of dimensions. Especially in spatio-temporal data, where each spatial location can be seen as one data point and each time step as one dimension, the data is often stored in a format that supports quick access to the individual dimensions of all points. Therefore, we propose Progressive Glimmer, a progressive multidimensional scaling (MDS) algorithm. We adapt the Glimmer algorithm to support progressive updates for changes in the data's dimensionality. We evaluate Progressive Glimmer's embedding quality and runtime. We observe that the algorithm provides more stable results, leading to visually consistent results for progressive rendering and making the approach applicable to streaming data. We show the applicability of our approach to spatio-temporal simulation ensemble data where we add the individual ensemble members progressively.","accessible_pdf":false,"authors":[{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"m_ever14@uni-muenster.de","is_corresponding":true,"name":"Marina Evers"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"david.haegele@visus.uni-stuttgart.de","is_corresponding":false,"name":"David H\u00e4gele"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"st142532@stud.uni-stuttgart.de","is_corresponding":false,"name":"S\u00f6ren D\u00f6ring"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"}],"award":"","doi":"","event_id":"w-pdav","event_title":"Progressive Data Analysis and Visualization (PDAV) Workshop.","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-pdav-1010","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop4","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Progressive Data Analysis and Visualization (PDAV) Workshop","session_uid":"w-pdav","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Progressive Data Analysis and Visualization (PDAV) Workshop"],"time_stamp":"2024-10-14T12:30:00Z","title":"Progressive Glimmer: Expanding Dimensionality in Multidimensional Scaling","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1007","abstract":"Symmetric second-order tensors are fundamental in various scientific and engineering domains, as they can represent properties such as material stresses or diffusion processes in brain tissue. In recent years, several approaches have been introduced and improved to analyze these fields using topological features, such as degenerate tensor locations, i.e., the tensor has repeated eigenvalues, or normal surfaces. Traditionally, the identification of such features has been limited to single tensor fields. However, it has become common to create ensembles to account for uncertainties and variability in simulations and measurements. In this work, we explore novel methods for describing and visualizing degenerate tensor locations in 3D symmetric second-order tensor field ensembles. We base our considerations on the tensor mode and analyze its practicality in characterizing the uncertainty of degenerate tensor locations before proposing a variety of visualization strategies to effectively communicate degenerate tensor information. We demonstrate our techniques for synthetic and simulation data sets.The results indicate that the interplay of different descriptions for uncertainty can effectively convey information on degenerate tensor locations.","accessible_pdf":true,"authors":[{"affiliations":["University of Cologne, Cologne, Germany"],"email":"tadea.schmitz@uni-koeln.de","is_corresponding":false,"name":"Tadea Schmitz"},{"affiliations":["RWTH Aachen University, Aachen, Germany"],"email":"gerrits@vis.rwth-aachen.de","is_corresponding":true,"name":"Tim Gerrits"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1007","image_caption":"Uncertainty visualizations for eight simulation results describing stresses in an O-ring with varying anisotropy parameter. The degenrate tensor lines of all ensembles members are shown in green, while the color-coded meanLine shows the locations of degenrate tensors within the mean tensor field and standard deviation of mode values. The yellow probabilityBand indicates locations where mode values have a probability of 25% of a mode value larger or equal to 0.99.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.08099","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-uncertainty/w-uncertainty-1007/w-uncertainty-1007_Preview.mp4?token=8rfUf11SCsATf7UzC6-xDYLlbQCurLQyjEYw9YyHGNI&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"Fw4FjoRpBtE","session_youtube_ff_link":"https://youtu.be/Fw4FjoRpBtE","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Exploring Uncertainty Visualization for Degenerate Tensors in 3D Symmetric Second-Order Tensor Field Ensembles","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1017","abstract":"Uncertainty visualization is a key component in translating important insights from ensemble data into actionable decision-making by visually conveying various aspects of uncertainty withina system. With the recent advent of fast surrogate models for computationally expensive simulations, users can interact with more aspects of data spaces than ever before. However, the integration of ensemble data with surrogate models in a decision-making tool brings up new challenges for uncertainty visualization, namely how to reconcile and communicate the new and different types of uncertainties brought in by surrogates and how to utilize these new data estimates in actionable ways. In this work, we examine these issues as they relate to high-dimensional data visualization, the integration of discrete datasets and the continuous representations of those datasets, and the unique difficulties associated with systems that allow users to iterate between input and output spaces. We assess the role of uncertainty visualization in facilitating intuitive and actionable interaction with ensemble data and surrogate models, and highlight key challenges in this new frontier of computational simulation.","accessible_pdf":true,"authors":[{"affiliations":["National Renewable Energy Lab, Golden, United States"],"email":"sam.molnar@nrel.gov","is_corresponding":true,"name":"Sam Molnar"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"jd.laurencechasen@nrel.gov","is_corresponding":false,"name":"J.D. Laurence-Chasen"},{"affiliations":["The Ohio State University, Columbus, United States","National Renewable Energy Lab, Golden, United States"],"email":"duan.418@osu.edu","is_corresponding":false,"name":"Yuhan Duan"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"julie.bessac@nrel.gov","is_corresponding":false,"name":"Julie Bessac"},{"affiliations":["National Renewable Energy Laboratory, Golden, United States"],"email":"kristi.potter@nrel.gov","is_corresponding":false,"name":"Kristi Potter"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1017","image_caption":"The relationship between ensemble datasets and surrogates. Parameters (left) and outputs (right) in solid rectangles represent realizations from an ensemble dataset. A forward surrogate (top) enables a user to propose novel parameter settings and predict output variables, along with quantified uncertainty relating to how close those predictions get to the original ensemble outputs. A reverse surrogate (bottom) allows the user to choose output values and determine possible input parameters that will get within a range of that proposed output. We assess the role of uncertainty visualization in facilitating intuitive and actionable interaction with ensemble data and surrogate models, and highlight key challenges in this new frontier of computational simulation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Uncertainty Visualization Challenges in Decision Systems with Ensemble Data & Surrogate Models","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1009","abstract":"Understanding and communicating data uncertainty is crucial for informed decision-making across various domains, including finance, healthcare, and public policy. This study investigates the impact of gender and acoustic variables on decision-making, confidence, and trust through a crowdsourced experiment. We compared visualization-only representations of uncertainty to text-forward and speech-forward bimodal representations, including multiple synthetic voices across gender. Speech-forward representations led to an increase in risky decisions, and text-forward representations led to lower confidence. Contrary to prior work, speech-forward forecasts did not receive higher ratings of trust. Higher normalized pitch led to a slight increase in decision confidence, but other voice characteristics had minimal impact on decisions and trust. An exploratory analysis of accented speech showed consistent results with the main experiment and additionally indicated lower trust ratings for information presented in Indian and Kenyan accents. The results underscore the importance of considering acoustic and contextual factors in presentation of data uncertainty.","accessible_pdf":false,"authors":[{"affiliations":["University of California Berkeley, Berkeley, United States"],"email":"chase_stokes@berkeley.edu","is_corresponding":true,"name":"Chase Stokes"},{"affiliations":["Stanford University, Stanford, United States"],"email":"sanker@stanford.edu","is_corresponding":false,"name":"Chelsea Sanker"},{"affiliations":["Versalytix, Columbus, United States"],"email":"bcogley@versalytix.com","is_corresponding":false,"name":"Bridget Cogley"},{"affiliations":["Tableau Research, Palo Alto, United States"],"email":"vsetlur@tableau.com","is_corresponding":false,"name":"Vidya Setlur"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1009","image_caption":"Example stimuli viewed by participants. (a) Visualization-only representation: a density plot showing the distribution of possible nighttime temperatures. (c) Speech-forward representation: contains the same density mark to provide some visual information, accompanied by an mp3 player which describes the distribution, temperature values, and likelihoods. We tested six different variants of these representations, with three masculine voices and three feminine voices. (c) Text-forward representation: contains the density mark and a text paragraph describing the distribution and likelihoods for different values. This is the same content as present in the speech forecast.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.08438","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-uncertainty/w-uncertainty-1009/w-uncertainty-1009_Preview.mp4?token=8Ya_rIZn7flqOlmKHILwBCvrxKdN1CX91FEvXi5cslk&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-uncertainty/w-uncertainty-1009/w-uncertainty-1009_Preview.srt?token=kKlx2RhluAJIjppd-_OxtbQUL-d_Ns5VrClhkPVXEA4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"pWsB9XzF8uA","session_youtube_ff_link":"https://youtu.be/pWsB9XzF8uA","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Voicing Uncertainty: How Speech, Text, and Visualizations Influence Decisions with Data Uncertainty","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1018","abstract":"Although people frequently make decisions based on uncertain forecasts about future events, there is little guidance about how best to represent the uncertainty in forecasts. One common approach is to use multiple forecast visualizations, in which multiple forecasts are plotted on the same graph. This provides an implicit representation of the uncertainty in the data, but it is not clear how many forecasts to show, or how viewers might be influenced by seeing the more extreme forecasts rather than those closer to the mean. In this study, we showed participants forecasts of wind speed data and they made decisions based on their predictions about the future wind speed. We allowed participants to choose how many forecasts to view prior to making a decision, and we manipulated the ordering of the forecasts and the cost of each additional forecast. We found that participants viewed more forecasts when the outcome was more ambiguous. The order of the forecasts had little impact on their decisions when there was no cost for the additional information. However, when there was a cost for each forecast, the participants were much more likely to make a guess based on only the first forecast shown. In this case, showing one of the extreme forecasts first led to less optimal decisions.","accessible_pdf":true,"authors":[{"affiliations":["Sandia National Laboratories, Albuquerque, United States"],"email":"lematze@sandia.gov","is_corresponding":true,"name":"Laura Matzen"},{"affiliations":["Sandia National Laboratories, Albuquerque, United States"],"email":"mcstite@sandia.gov","is_corresponding":false,"name":"Mallory C Stites"},{"affiliations":["Sandia National Laboratories, Albuquerque, United States"],"email":"kmdivis@sandia.gov","is_corresponding":false,"name":"Kristin M Divis"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"abendeck3@gatech.edu","is_corresponding":false,"name":"Alexander Bendeck"},{"affiliations":["Georgia Institute of Technology, Atlanta, United States"],"email":"john.stasko@cc.gatech.edu","is_corresponding":false,"name":"John Stasko"},{"affiliations":["Northeastern University, Boston, United States"],"email":"l.padilla@northeastern.edu","is_corresponding":false,"name":"Lace M. Padilla"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1018","image_caption":"In this experiment, participants made decisions based on wind speed forecasts shown in multiple forecast visualizations. They saw one forecast to start, but could add up to 19 more forecasts to the plot, one at a time, prior to making their decisions. We manipulated the risk of the situation (the percentage of forecasts crossing the critical threshold of 50 miles per hour), the order in which the first three forecasts in the set appeared, and the cost of obtaining additional forecasts. This figure shows examples of the stimuli, each displaying three forecasts, at different levels of the Percent Crossing manipulation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/vhs7w/","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Effects of Forecast Number, Order, and Cost in Multiple Forecast Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1015","abstract":"Functional depth is a well-known technique used to derive descriptive statistics (e.g., median, quartiles, and outliers) for 1D data. Surface boxplots extend this concept to ensembles of images, helping scientists and users identify representative and outlier images. However, the computational time for surface boxplots increases cubically with the number of ensemble members, making it impractical for integration into visualization tools.In this paper, we propose a deep-learning solution for efficient depth prediction and computation of surface boxplots for time-varying ensemble data. Our deep learning framework accurately predicts member depths in a surface boxplot, achieving average speedups of 6X on a CPU and 15X on a GPU for the 2D Red Sea dataset with 50 ensemble members compared to the traditional depth computation algorithm. Our approach achieves at least a 99\\% level of rank preservation, with order flipping occurring only at pairs with extremely similar depth values that pose no statistical differences. This local flipping does not significantly impact the overall depth order of the ensemble members.","accessible_pdf":true,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"mengjiao@sci.utah.edu","is_corresponding":true,"name":"Mengjiao Han"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":false,"name":"Tushar M. Athawale"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jixianli@sci.utah.edu","is_corresponding":false,"name":"Jixian Li"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1015","image_caption":"Functional depth is a valuable technique for analyzing uncertainty of 1D data, and surface boxplots extend this concept to image ensembles, aiding in identifying representative and outlier images. However, the high computational cost limits their usability. This paper introduces a deep-learning framework for efficient surface boxplot computation in time-varying ensemble data. Our method accelerates depth prediction, achieving up to 15X speedups on a GPU while maintaining 99% rank preservation accuracy, making it a practical solution for integrating surface boxplots into visualization tools.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Accelerated Depth Computation for Surface Boxplots with Deep Learning","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1012","abstract":"Uncertainty visualization is an emerging research topic in data vi- sualization because neglecting uncertainty in visualization can lead to inaccurate assessments. In this short paper, we study the prop- agation of multivariate data uncertainty in visualization. Although there have been a few advancements in probabilistic uncertainty vi- sualization of multivariate data, three critical challenges remain to be addressed. First, state-of-the-art probabilistic uncertainty visual- ization framework is limited to bivariate data (two variables). Sec- ond, the existing uncertainty visualization algorithms use compu- tationally intensive techniques and lack support for cross-platform portability. Third, as a consequence of the computational expense, integration into interactive production visualization tools is imprac- tical. In this work, we address all three issues and make a threefold contribution. First, we generalize the state-of-the-art probabilis- tic framework for bivariate data to multivariate data with a arbi- trary number of variables. Second, through utilization of VTK-m\u2019s shared-memory parallelism and cross-platform compatibility fea- tures, we demonstrate acceleration of multivariate uncertainty visu- alization on different many-core architectures, including OpenMP and AMD GPUs. Third, we demonstrate the integration of our al- gorithms with the ParaView software. We demonstrate utility of our algorithms through experiments on multivariate simulation data.","accessible_pdf":false,"authors":[{"affiliations":["Indiana University Bloomington, Bloomington, United States"],"email":"gautamhari@outlook.com","is_corresponding":true,"name":"Gautam Hari"},{"affiliations":["Indiana University Bloomington, Bloomington, United States"],"email":"nrushad2001@gmail.com","is_corresponding":false,"name":"Nrushad A Joshi"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"jay.wang@rutgers.edu","is_corresponding":false,"name":"Zhe Wang"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"gongq@ornl.gov","is_corresponding":false,"name":"Qian Gong"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"pugmire@ornl.gov","is_corresponding":false,"name":"David Pugmire"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"kmorel@acm.org","is_corresponding":false,"name":"Kenneth Moreland"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"klasky@ornl.gov","is_corresponding":false,"name":"Scott Klasky"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"pnorbert@ornl.gov","is_corresponding":false,"name":"Norbert Podhorszki"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":false,"name":"Tushar M. Athawale"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1012","image_caption":"A simulation of the Deep Water Impact. From Left to Right, the images are a) Original Dataset, b) Compressed data without uncertainty, and c) Compressed data with uncertainty. The colors of the Uncertainty image range from transparent deep purple regions that indicate positions of lower probability, whereas the less transparent bright yellow regions indicate positions of higher probability. Uncertainty visualization recovers key topological structures, such as the rib-like formations (e.g., rib-like structure in the inset views), which appear broken in traditional mean-field visualization. This probabilistic approach of uncertainty visualization allows for the recovery of potentially important features in uncertain data.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-uncertainty/w-uncertainty-1012/w-uncertainty-1012_Preview.mp4?token=fRsU_limNTdX9nAIOhU27Jqv9DG9Bf1P4Bu9Hk_CLCg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-uncertainty/w-uncertainty-1012/w-uncertainty-1012_Preview.srt?token=CRT8diSvS6Vaelc4Nun96O8lftyDKhnf9SjCVXAyuog&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"QH7dbVdSO3I","session_youtube_ff_link":"https://youtu.be/QH7dbVdSO3I","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"FunM^2C: A Filter for Uncertainty Visualization of Multivariate Data on Multi-Core Devices","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1011","abstract":"Current research provides methods to communicate uncertainty and adapts classical algorithms of the visualization pipeline to take the uncertainty into account. Various existing visualization frameworks include methods to present uncertain data but do not offer transformation techniques tailored to uncertain data. Therefore, we propose a software package for uncertainty-aware data analysis in Python (UADAPy) offering methods for uncertain data along the visualization pipeline.We aim to provide a platform that is the foundation for further integration of uncertainty algorithms and visualizations. It provides common utility functionality to support research in uncertainty-aware visualization algorithms and makes state-of-the-art research results accessible to the end user. The project is available at https://github.com/UniStuttgart-VISUS/uadapy.","accessible_pdf":false,"authors":[{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"patrick.paetzold@uni-konstanz.de","is_corresponding":true,"name":"Patrick Paetzold"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"david.haegele@visus.uni-stuttgart.de","is_corresponding":false,"name":"David H\u00e4gele"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"m_ever14@uni-muenster.de","is_corresponding":false,"name":"Marina Evers"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"weiskopf@visus.uni-stuttgart.de","is_corresponding":false,"name":"Daniel Weiskopf"},{"affiliations":["University of Konstanz, Konstanz, Germany"],"email":"oliver.deussen@uni-konstanz.de","is_corresponding":false,"name":"Oliver Deussen"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1011","image_caption":"The UADAPy software package is a toolbox providing high-dimensional uncertain sample data sets, uncertainty-aware data transformations and analysis methods, and visualization methods tailored to show uni- and multivariate sets of probability distributions.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"UADAPy: An Uncertainty-Aware Visualization and Analysis Toolbox","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1014","abstract":"Isosurface visualization is fundamental for exploring and analyzing 3D volumetric data. Marching cubes (MC) algorithms with linear interpolation are commonly used for isosurface extraction and visualization.Although linear interpolation is easy to implement, it has limitations when the underlying data is complex and high-order, which is the case for most real-world data. Linear interpolation can output vertices at the wrong location. Its inability to deal with sharp features and features smaller than grid cells can create holes and broken pieces in the extracted isosurface. Despite these limitations, isosurface visualizations typically do not include insight into the spatial location and the magnitude of these errors. We utilize high-order interpolation methods with MC algorithms and interactive visualization to highlight these uncertainties. Our visualization tool helps identify the regions of high interpolation errors. It also allows users to query local areas for details and compare the differences between isosurfaces from different interpolation methods. In addition, we employ high-order methods to identify and reconstruct possible features that linear methods cannot detect.We showcase how our visualization tool helps explore and understand the extracted isosurface errors through synthetic and real-world data.","accessible_pdf":true,"authors":[{"affiliations":["Scientific Computing and Imaging Institute, Salk Lake City, United States"],"email":"touermi@sci.utah.edu","is_corresponding":true,"name":"Timbwaoga A. J. Ouermi"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jixianli@sci.utah.edu","is_corresponding":false,"name":"Jixian Li"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":false,"name":"Tushar M. Athawale"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1014","image_caption":"Our proposed visualization system highlights errors introduced by linear interpolation methods and allows users to query local vertex differences between interpolation methods. The first column shows the approximated isosurface uncertainty and local selection using the colormap and transparent box, respectively. The second column shows the differences between linear and cubic, linear and WENO, and the approximated error for each vertex inside the transparent boxes. The third column shows a global comparison between linear and WENO. The fourth and fifth columns show a comparison between isosurfaces with (transparent orange) and without (opaque blue) possible hidden features that indicate isosurface feature uncertainty.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2409.00043","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Estimation and Visualization of Isosurface Uncertainty from Linear and High-Order Interpolation Methods","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1010","abstract":"The increasing adoption of Deep Neural Networks (DNNs) has led to their application in many challenging scientific visualization tasks. While advanced DNNs offer impressive generalization capabilities, understanding factors such as model prediction quality, robustness, and uncertainty is crucial. These insights can enable domain scientists to make informed decisions about their data. However, DNNs inherently lack ability to estimate prediction uncertainty, necessitating new research to construct robust uncertainty-aware visualization techniques tailored for various visualization tasks. In this work, we propose uncertainty-aware implicit neural representations to model scalar field data sets effectively and comprehensively study the efficacy and benefits of estimated uncertainty information for volume visualization tasks. We evaluate the effectiveness of two principled deep uncertainty estimation techniques: (1) Deep Ensemble and (2) Monte Carlo Dropout (MCDropout). These techniques enable uncertainty-informed volume visualization in scalar field data sets. Our extensive exploration across multiple data sets demonstrates that uncertainty-aware models produce informative volume visualization results. Moreover, integrating prediction uncertainty enhances the trustworthiness of our DNN model, making it suitable for robustly analyzing and visualizing real-world scientific volumetric data sets.","accessible_pdf":false,"authors":[{"affiliations":["IIT kanpur , Kanpur , India"],"email":"saklanishanu@gmail.com","is_corresponding":false,"name":"Shanu Saklani"},{"affiliations":["Indian Institute of Technology Kanpur, Kanpur, India"],"email":"chitwangoel1010@gmail.com","is_corresponding":false,"name":"Chitwan Goel"},{"affiliations":["Indian Institute of Technology Kanpur, Kanpur, India"],"email":"shrey.bansal75@gmail.com","is_corresponding":false,"name":"Shrey Bansal"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"jay.wang@rutgers.edu","is_corresponding":false,"name":"Zhe Wang"},{"affiliations":["Indian Institute of Technology Kanpur (IIT Kanpur), Kanpur, India"],"email":"soumya.cvpr@gmail.com","is_corresponding":false,"name":"Soumya Dutta"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":false,"name":"Tushar M. Athawale"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"pugmire@ornl.gov","is_corresponding":false,"name":"David Pugmire"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1010","image_caption":"Showcasing how uncertainty-aware deep learning models produce informative and reliable volume rendering results. Furthermore, the results demonstrate how prediction uncertainty in volume rendering can be quantified and communicated to domain scientists, aiding in the interpretation of deep learning model-generated outcomes.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2408.06018","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Uncertainty-Informed Volume Visualization using Implicit Neural Representation","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1013","abstract":"Uncertainty is inherent to most data, including vector field data, yet it is often omitted in visualizations and representations. Effective uncertainty visualization can enhance the understanding and interpretability of vector field data. For instance, in the context of severe weather events such as hurricanes and wildfires, effective uncertainty visualization can provide crucial insights about fire spread or hurricane behavior and aid in resource management and risk mitigation. Glyphs are commonly used for representing vector uncertainty but are often limited to 2D. In this work, we present a glyph-based technique for accurately representing 3D vector uncertainty and a comprehensive framework for visualization, exploration, and analysis using our new glyphs. We employ hurricane and wildfire examples to demonstrate the efficacy of our glyph design and visualization tool in conveying vector field uncertainty.","accessible_pdf":true,"authors":[{"affiliations":["Scientific Computing and Imaging Institute, Salk Lake City, United States"],"email":"touermi@sci.utah.edu","is_corresponding":true,"name":"Timbwaoga A. J. Ouermi"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jixianli@sci.utah.edu","is_corresponding":false,"name":"Jixian Li"},{"affiliations":["Sandia National Laboratories, Albuquerque, United States"],"email":"zbmorro@sandia.gov","is_corresponding":false,"name":"Zachary Morrow"},{"affiliations":["Sandia National Laboratories, Albuquerque, United States"],"email":"bartv@sandia.gov","is_corresponding":false,"name":"Bart van Bloemen Waanders"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1013","image_caption":"3D vector uncertainty glyph. The glyphs' direction corresponds to the median vector direction. The cone glyph encodes angle variation and maximum vector length but omits magnitude variation. The comet glyph includes the magnitude variation and minimum magnitude. However, these variations are not easily discernible. While both the tailed-disc and squid distinguish these uncertainties, the small arrow size and rotational symmetry of the tailed-disc limit the perception. Our proposed squid glyph effectively distinguishes between magnitude and direction variations. Additionally, it employs superellipses (2D superquadrics) to better approximate directional variations, eliminate rotational ambiguity, and improve overall accuracy.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"http://arxiv.org/abs/2409.00042","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Glyph-Based Uncertainty Visualization and Analysis of Time-Varying Vector Field","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1019","abstract":"We present a simple comparative framework for testing and developing uncertainty modeling in uncertain marching cubes implementations. The selection of a model to represent the probability distribution of uncertain values directly influences the memory use, run time, and accuracy of an uncertainty visualization algorithm. We use an entropy calculation directly on ensemble data to establish an expected result and then compare the entropy from various probability models, including uniform, Gaussian, histogram, and quantile models. Our results verify that models matching the distribution of the ensemble indeed match the entropy. We further show that fewer bins in nonparametric histogram models are more effective whereas large numbers of bins in quantile models approach data accuracy.","accessible_pdf":true,"authors":[{"affiliations":["University of Illinois Urbana-Champaign, Urbana, United States"],"email":"sisneros@illinois.edu","is_corresponding":true,"name":"Robert Sisneros"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"tushar.athawale@gmail.com","is_corresponding":false,"name":"Tushar M. Athawale"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"kmorel@acm.org","is_corresponding":false,"name":"Kenneth Moreland"},{"affiliations":["Oak Ridge National Laboratory, Oak Ridge, United States"],"email":"pugmire@ornl.gov","is_corresponding":false,"name":"David Pugmire"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1019","image_caption":"Representative test/result from our framework (wind dataset ensemble created via random uniform noise). The entropy for the full distribution model matches closely to the uniform distribution assumption (red boxes) and the minimum entropy with the Gaussian assumption may not always be the best representative.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"An Entropy-Based Test and Development Framework for Uncertainty Modeling in Level-Set Visualizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-uncertainty-1016","abstract":"Wildfire poses substantial risks to our health, environment, and economy. Studying wildfire is challenging due to its complex inter- action with the atmosphere dynamics and the terrain. Researchers have employed ensemble simulations to study the relationship be- tween variables and mitigate uncertainties in unpredictable initial conditions. However, many domain scientists are unaware of the advanced visualization tools available for conveying uncertainty. To bring some uncertainty visualization techniques, we build an interactive visualization system that utilizes a band-depth-based method that provides a statistical summary and visualization for fire front contours from the ensemble. We augment the visualiza- tion system with capabilities to study wildfires as a dynamic system. In this paper, We demonstrate how our system can support domain scientists in studying fire spread patterns, identifying outlier simu- lations, and navigating to interesting instances based on a summary of events.","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jixianli@sci.utah.edu","is_corresponding":true,"name":"Jixian Li"},{"affiliations":["Scientific Computing and Imaging Institute, Salk Lake City, United States"],"email":"touermi@sci.utah.edu","is_corresponding":false,"name":"Timbwaoga A. J. Ouermi"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"crj@sci.utah.edu","is_corresponding":false,"name":"Chris R. Johnson"}],"award":"","doi":"","event_id":"w-uncertainty","event_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-uncertainty-1016","image_caption":"We introduce our interactive interface for visualizing uncertainties of ensemble wildfire simulations. Our interface uses the contour boxplot to summarize the trend and variations of fire spreading patterns. Our interface also supports transfer-function-based color and opacity mapping for visualizing scalar functions from wildfire simulations, glyph- and streamline-based wind visualization, temporal events summary, contour band depths, spatial query for the fire arrival time (red sphere in the terrain shows the query point)","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop5","session_room":"Bayshore VI","session_room_id":"bayshore6","session_title":"Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks","session_uid":"w-uncertainty","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Uncertainty Visualization: Applications, Techniques, Software, and Decision Frameworks"],"time_stamp":"2024-10-14T12:30:00Z","title":"Visualizing Uncertainties in Ensemble Wildfire Forecast Simulations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-storygenai-5237","abstract":"Communicating data insights in an accessible and engaging manner to a broader audience remains a significant challenge. To address this problem, we introduce the Emoji Encoder, a tool that generates a set of emoji recommendations for the field and category names appearing in a tabular dataset. The selected set of emoji encodings can be used to generate configurable unit charts that combine plain text and emojis as word-scale graphics. These charts can serve to contrast values across multiple quantitative fields for each row in the data or to communicate trends over time. Any resulting chart is simply a block of text characters, meaning that it can be directly copied into a text message or posted on a communication platform such as Slack or Teams. This work represents a step toward our larger goal of developing novel, fun, and succinct data storytelling experiences that engage those who do not identify as data analysts. Emoji-based unit charts can offer contextual cues related to the data at the center of a conversation on platforms where emoji-rich communication is typical.","accessible_pdf":false,"authors":[{"affiliations":["University of Waterloo, Waterloo, Canada","Tableau Research, Seattle, United States"],"email":"mbrehmer@uwaterloo.ca","is_corresponding":true,"name":"Matthew Brehmer"},{"affiliations":["Tableau Research, Palo Alto, United States"],"email":"vsetlur@tableau.com","is_corresponding":false,"name":"Vidya Setlur"},{"affiliations":["McGraw Hill, Seattle, United States","Tableau Software, Seattle, United States"],"email":"zoezoezoe.cc@gmail.com","is_corresponding":false,"name":"Zoe Zoe"},{"affiliations":["Northeastern University, Portland, United States"],"email":"m.correll@northeastern.edu","is_corresponding":false,"name":"Michael Correll"}],"award":"","doi":"","event_id":"w-storygenai","event_title":"Workshop on Data Storytelling in an Era of Generative AI","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-storygenai-5237","image_caption":"The EMOJI ENCODER is an interactive chart authoring interface for Tableau that generates emoji representations based on field names and the values of categorical fields. In this example, an emoji pictograph depicts flood risk values across the Netherlands along with the number and type of employees in each province, shown in a Slack Message, or, because emojis are simply Unicode Characters, in this caption:\ud83c\udfd9\ufe0f \ud83d\udd35 Drenthe \ud83c\udfd9\ufe0f \ud83d\udd35 Flevoland \ud83c\udfd9\ufe0f \ud83d\udd35 Friesland \ud83c\udfd9\ufe0f \ud83d\udd34 Gelderland \ud83c\udfd9\ufe0f \ud83d\udd35 Groningen \ud83c\udfd9\ufe0f \ud83d\udd35 Limburg \ud83c\udfd9\ufe0f \u26aa\ufe0f \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc North Brabant \ud83c\udfd9\ufe0f \ud83d\udd35 \ud83c\udfe2 \ud83c\udfe2 \ud83c\udfe2 \ud83c\udfe2 \ud83c\udfe2 North Holland \ud83c\udfd9\ufe0f \ud83d\udd35 Overijssel \ud83c\udfd9\ufe0f \ud83d\udd34 \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc South Holland \ud83c\udfd9\ufe0f \u26aa\ufe0f \ud83d\udc68\u200d\ud83d\udcbc \ud83d\udc68\u200d\ud83d\udcbc Utrecht \ud83c\udfd9\ufe0f \ud83d\udd35 Zeeland ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://www.arxiv.org/abs/2408.13418","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop6","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Workshop on Data Storytelling in an Era of Generative AI","session_uid":"w-storygenai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Workshop on Data Storytelling in an Era of Generative AI"],"time_stamp":"2024-10-13T16:00:00Z","title":"The Data-Wink Ratio: Emoji Encoder for Generating Semantically-Resonant Unit Charts","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-storygenai-6168","abstract":"Data-driven storytelling serves as a crucial bridge for communicating ideas in a persuasive way. However, the manual creation of data stories is a multifaceted, labor-intensive, and case-specific effort, limiting their broader application. As a result, automating the creation of data stories has emerged as a significant research thrust. Despite advances in Artificial Intelligence, the systematic generation of data stories remains challenging due to their hybrid nature: they must frame a perspective based on a seed idea in a top-down manner, similar to traditional storytelling, while coherently grounding insights of given evidence in a bottom-up fashion, akin to data analysis. These dual requirements necessitate precise constraints on the permissible space of a data story. In this viewpoint, we propose integrating constraints into the data story generation process. Defined upon the hierarchies of interpretation and articulation, constraints shape both narrations and illustrations to align with seed ideas and contextualized evidence. We identify the taxonomy and required functionalities of these constraints. Although constraints can be heterogeneous and latent, we explore the potential to represent them in a computation-friendly fashion via Domain-Specific Languages. We believe that leveraging constraints will balance the artistic and engineering aspects of data story generation.","accessible_pdf":true,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"yu.zhe.s.shi@gmail.com","is_corresponding":false,"name":"Yu-Zhe Shi"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"haotian.li@connect.ust.hk","is_corresponding":true,"name":"Haotian Li"},{"affiliations":["Peking University, Beijing, China"],"email":"ruanlecheng@whai.pku.edu.cn","is_corresponding":false,"name":"Lecheng Ruan"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"","event_id":"w-storygenai","event_title":"Workshop on Data Storytelling in an Era of Generative AI","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-storygenai-6168","image_caption":"The architecture of data-driven storytelling with hierarchical constraints. We present intuitive illustrations of the representations with blocks (see Sec. 3.3). The colors highlighting textual narratives and visual illustrations are encoded according to their respective constraints.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop6","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Workshop on Data Storytelling in an Era of Generative AI","session_uid":"w-storygenai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Workshop on Data Storytelling in an Era of Generative AI"],"time_stamp":"2024-10-13T16:00:00Z","title":"Constraint representation towards precise data-driven storytelling","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-storygenai-7043","abstract":"Creating data stories from raw data is challenging due to humans\u2019 limited attention spans and the need for specialized skills. Recent advancements in large language models (LLMs) offer great opportunities to develop systems with autonomous agents to streamline the data storytelling workflow. Though multi-agent systems have benefits such as fully realizing LLM potentials with decomposed tasks for individual agents, designing such systems also faces challenges in task decomposition, performance optimization for sub-tasks, and workflow design. To better understand these issues, we develop Data Director, an LLM-based multi-agent system designed to automate the creation of animated data videos, a representative genre of data stories. Data Director interprets raw data, breaks down tasks, designs agent roles to make informed decisions automatically, and seamlessly integrates diverse components of data videos. A case study demonstrates Data Director\u2019s effectiveness in generating data videos. Throughout development, we have derived lessons learned from addressing challenges, guiding further advancements in autonomous agents for data storytelling. We also shed light on future directions for global optimization, human-in-the-loop design, and the application of advanced multi-modal LLMs.","accessible_pdf":false,"authors":[{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"lshenaj@connect.ust.hk","is_corresponding":true,"name":"Leixian Shen"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"haotian.li@connect.ust.hk","is_corresponding":false,"name":"Haotian Li"},{"affiliations":["Microsoft, Beijing, China"],"email":"yunvvang@gmail.com","is_corresponding":false,"name":"Yun Wang"},{"affiliations":["The Hong Kong University of Science and Technology, Hong Kong, China"],"email":"huamin@cse.ust.hk","is_corresponding":false,"name":"Huamin Qu"}],"award":"","doi":"","event_id":"w-storygenai","event_title":"Workshop on Data Storytelling in an Era of Generative AI","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-storygenai-7043","image_caption":"Architecture of Data Director, which is an LLM-based multi-agent system for automatic animated data video creation.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop6","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Workshop on Data Storytelling in an Era of Generative AI","session_uid":"w-storygenai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Workshop on Data Storytelling in an Era of Generative AI"],"time_stamp":"2024-10-13T16:00:00Z","title":"From Data to Story: Towards Automatic Animated Data Video Creation with LLM-based Multi-Agent Systems","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-storygenai-7072","abstract":"Crafting accurate and insightful narratives from data visualization is essential in data storytelling. Like creative writing, where one reads to write a story, data professionals must effectively ``read\" visualizations to create compelling data stories. In education, helping students develop these skills can be achieved through exercises that ask them to create narratives from data plots, demonstrating both ``show\" (describing the plot) and ``tell\" (interpreting the plot). Providing formative feedback on these exercises is crucial but challenging in large-scale educational settings with limited resources. This study explores using GPT-4o, a multimodal LLM, to generate and evaluate narratives from data plots. The LLM was tested in zero-shot, one-shot, and two-shot scenarios, generating narratives and self-evaluating their depth. Human experts also assessed the LLM's outputs. Additionally, the study developed machine learning and LLM-based models to assess student-generated narratives using LLM-generated data. Human experts validated a subset of these machine assessments. The findings highlight the potential of LLMs to support scalable formative assessment in teaching data storytelling skills, which has important implications for AI-supported educational interventions.","accessible_pdf":false,"authors":[{"affiliations":["University of Maryland Baltimore County, Baltimore, United States"],"email":"narens1@umbc.edu","is_corresponding":true,"name":"Naren Sivakumar"},{"affiliations":["University of Maryland, Baltimore County, Baltimore, United States"],"email":"lujiec@umbc.edu","is_corresponding":false,"name":"Lujie Karen Chen"},{"affiliations":["University of Maryland,Baltimore County, Baltimore, United States"],"email":"io11937@umbc.edu","is_corresponding":false,"name":"Pravalika Papasani"},{"affiliations":["University of maryland baltimore county, Hanover, United States"],"email":"vignam1@umbc.edu","is_corresponding":false,"name":"Vigna Majmundar"},{"affiliations":["Towson University, Towson, United States"],"email":"jfeng@towson.edu","is_corresponding":false,"name":"Jinjuan Heidi Feng"},{"affiliations":["SRI International, Menlo Park, United States"],"email":"louise.yarnall@sri.com","is_corresponding":false,"name":"Louise Yarnall"},{"affiliations":["University of Alabama, Tuscaloosa, United States"],"email":"jgong@umbc.edu","is_corresponding":false,"name":"Jiaqi Gong"}],"award":"","doi":"","event_id":"w-storygenai","event_title":"Workshop on Data Storytelling in an Era of Generative AI","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-storygenai-7072","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop6","session_room":"Bayshore VII","session_room_id":"bayshore7","session_title":"Workshop on Data Storytelling in an Era of Generative AI","session_uid":"w-storygenai","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Workshop on Data Storytelling in an Era of Generative AI"],"time_stamp":"2024-10-13T16:00:00Z","title":"Show and Tell: Exploring Large Language Model\u2019s Potential inFormative Educational Assessment of Data Stories","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1009","abstract":"The introduction of novel visualizations through animated transitions is a well-established practice in visualization research. In our preliminary exploratory study, we investigate whether this approach could effectively facilitate the introduction of new visualization types to blind and low-vision (BLV) individuals. Specifically, we present two approaches, direct and gradual, to a user who is blind and compare their potential usefulness. The direct approach involved a single, comprehensive description of the visual elements, while the gradual approach utilized a series of visualizations and transitions, starting from familiar visualization types known to the user and progressing to the final, novel visualization. We introduce two genomics visualizations, sequence logos and Circos plots, to the user with descriptions and then ask them to sketch the visualizations to reflect their understanding of the visual elements. Feedback from the user indicates that the gradual approach was easier to follow, suggesting that BLV individuals could benefit more from this method. We outline our design process and insights from the study, and highlight key considerations for future research directions.","accessible_pdf":true,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"tsmits@hms.harvard.edu","is_corresponding":true,"name":"Thomas C. Smits"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"sehi_lyi@hms.harvard.edu","is_corresponding":false,"name":"Sehi L'Yi"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"huyen_nguyen@hms.harvard.edu","is_corresponding":false,"name":"Huyen N. Nguyen"},{"affiliations":["University of California, Berkeley, United States","Harvard Medical School, Boston, United States"],"email":"apmar@berkeley.edu","is_corresponding":false,"name":"Andrew P Mar"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1009","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/v7mxz","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-accessible/w-accessible-1009/w-accessible-1009_Preview.mp4?token=zl41wAq6_2TYIJKHqSkWTfw9TnIr_UEbl7kig20VTVQ&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"Explaining Unfamiliar Genomics Data Visualizations to a Blind Individual through Transitions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1011","abstract":"Content on the internet is often not accessible to all users. In particular with data visualizations, blind and visually impaired people face the problem that the presented data is either impossible or very difficult to access with the help of a screen reader. The aim of this paper is to develop a concept that enables screen reader users to explore online data visualizations. The concept should enable users to gain a comprehensive overview of the data and search for specific data items. In addition, sonification is integrated to help users understand the data. A user study with five non-sighted participants provides insight into how data visualizations can be explored with the help of the prototype.","accessible_pdf":true,"authors":[{"affiliations":["School of Informatics, Communications and Media, Hagenberg im M\u00fchlkreis, Austria"],"email":"s2110745013@students.fh-hagenberg.at","is_corresponding":false,"name":"Julia Loitzenbauer MSc"},{"affiliations":["University of Applied Sciences Upper Austria, Hagenberg im M\u00fchlkreis, Austria"],"email":"mandy.keck@fh-hagenberg.at","is_corresponding":true,"name":"Mandy Keck"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1011","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-accessible/w-accessible-1011/w-accessible-1011_Preview.mp4?token=UDUelzgQWxbtdbilMXKHVfJGRx8XObrEjCMjZXXUB0U&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"A Screen reader and Sonifcation Approach for non-sighted Users to explore Data Visualizations on the Internet","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1012","abstract":"Embedded information displays (EIDs) are becoming increasingly ubiquitous on home appliances and devices such as microwaves, coffee machines, fridges, or digital thermostats. These displays are often multi-purpose, functioning as interfaces for selecting device settings, communicating operating status using simple visualizations, and displaying notifications. However, their usability for people in the late adulthood (PLA) development stage is not well-understood. We report on two focus groups with PLA (n=11, ages 76-94) from a local retirement community. Participants were shown images of everyday home electronics and appliances, answering questions about their experiences using the EIDs. Using open coding, we qualitatively analyzed their comments to distill key themes regarding how EIDs can negatively affect PLA's ability to take in information (e.g., poor labels) and interact with these devices (e.g., unintuitive steps) alongside strategies employed to work around these issues. We argue that understanding the equitable design and communication of devices' functions, operating status, and messages is important for future information display designers. We hope this work stimulates further investigation into more equitable EID design.","accessible_pdf":true,"authors":[{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"zwhile@cs.umass.edu","is_corresponding":true,"name":"Zack While"},{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"hwheelerklai@umass.edu","is_corresponding":false,"name":"Henry Wheeler-Klainberg"},{"affiliations":["University of Stuttgart, Stuttgart, Germany"],"email":"research@blascheck.eu","is_corresponding":false,"name":"Tanja Blascheck"},{"affiliations":["Universit\u00e9 Paris-Saclay, CNRS, Orsay, France","Inria, Saclay, France"],"email":"petra.isenberg@inria.fr","is_corresponding":false,"name":"Petra Isenberg"},{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"asarv@cs.umass.edu","is_corresponding":false,"name":"Ali Sarvghad"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1012","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://arxiv.org/abs/2410.03929","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"Toward Understanding the Experiences of People in Late Adulthood with Embedded Information Displays in the Home","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1013","abstract":"Blind and visually impaired people are often excluded from the analysis of datasets because data visualizations primarily address the visual channel. For this reason, this paper examines different physical and tactile encodings for preparing datasets for non-sighted users. Using a user-centered design approach, the authors investigate how this target group perceive visualizations tactilely and to what extent different encodings are suitable for exploring different datasets. Furthermore, it will be investigated how tactile contextual components such as labels, legends, grids and guidelines must be designed so that the information can be interpreted as accurately as possible. A user study with five blind participants provided valuable insights for the design of tactile data physicalizations.","accessible_pdf":true,"authors":[{"affiliations":["School of Informatics, Communications and Media, Hagenberg im M\u00fchlkreis, Austria"],"email":"s2210631004@students.fh-hagenberg.at","is_corresponding":false,"name":"Julian Ebermann"},{"affiliations":["University of Applied Sciences Upper Austria, Hagenberg im M\u00fchlkreis, Austria"],"email":"mandy.keck@fh-hagenberg.at","is_corresponding":true,"name":"Mandy Keck"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1013","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-accessible/w-accessible-1013/w-accessible-1013_Preview.mp4?token=6pWsd2682UVONlb44eOHQ1e-4KzPr1bQrNxBU9s5Dec&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"From Sight to Touch: Designing Tactile Data Physicalizations for Non-sighted Users","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1014","abstract":"AChart is a suite of open-source web-based tools written in TypeScript with Node.js to create and interpret semantically-enriched SVG-based accessible charts.AChart Creator is a command-line tool which generates accessible SVG charts from CSV files using the D3 framework, by injecting ARIA roles and properties from the AChart taxonomy.AChart Interpreter is a client-side web application and exectubale package which interprets such a semantically-enriched SVG chart and displays side-by-side graphical and textual versions of the chart.It can read out the chart using synthetic speech and its user interface is screen reader compatible. It can be used both by blind users to gain an understanding of a chart, as well as by developers and chart authors to verify and validate the accessibility markup of an SVG chart.AChart Summariser is a command-line tool which interprets an accessible SVG chart and outputs a textual summary of the chart.AChart currently supports bar charts, line charts, and pie charts.","accessible_pdf":true,"authors":[{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"kandrews@iicm.edu","is_corresponding":true,"name":"Keith Andrews"},{"affiliations":["Graz University of Technology, Graz, Austria"],"email":"chr.kopel@gmail.com","is_corresponding":false,"name":"Christopher Alexander Kopel"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-accessible-1014","image_caption":"AChart Interpreter showing an accessible multi-line chart. The user has navigated to the third data point of Data Series 1.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"Accessible SVG Charts with AChart","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1015","abstract":"Data visualizations are typically not accessible to blind and low-vision users. The most widely used remedy for making data visualizations accessible is text descriptions. Yet, manually creating useful text descriptions is often omitted by visualization authors, either because of a lack of awareness or a perceived burden. Automatically generated text descriptions are a potential partial remedy. However, with current methods it is unfeasible to create text descriptions for complex scientific charts. In this paper, we describe our methods for generating text descriptions for one complex scientific visualization: the UpSet plot. UpSet is a widely used technique for the visualization and analysis of sets and their intersections. At the same time, UpSet is arguably unfamiliar to novices and used mostly in scientific contexts. Generating text descriptions for UpSet plots is challenging because the patterns observed in UpSet plots have not been studied. We first analyze patterns present in dozens of published UpSet plots. We then introduce software that generates text descriptions for UpSet plots based on the patterns present in the chart. Finally, we introduce a web service that generates text descriptions based on a specification of an UpSet plot, and demonstrate its use in both an interactive web-based implementation and a static Python implementation of UpSet.","accessible_pdf":false,"authors":[{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"ishratjahan.eliza@utah.edu","is_corresponding":true,"name":"Ishrat Jahan Eliza"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jakew@sci.utah.edu","is_corresponding":false,"name":"Jake Wagoner"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"jwilburn@sci.utah.edu","is_corresponding":false,"name":"Jack Wilburn"},{"affiliations":["Scientific Computing and Imaging Institute, Salt Lake City, United States"],"email":"natelanzadevelopment@gmail.com","is_corresponding":false,"name":"Nate Lanza"},{"affiliations":["University College London, London, United Kingdom"],"email":"d.hajas@ucl.ac.uk","is_corresponding":false,"name":"Daniel Hajas"},{"affiliations":["University of Utah, Salt Lake City, United States"],"email":"alex@sci.utah.edu","is_corresponding":false,"name":"Alexander Lex"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1015","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"Accessible Text Descriptions for UpSet Plots","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-accessible-1024","abstract":"Many data visualization tools require a mouse. While such tools widen access to data communication and expression, their implementations are difficult or impossible to use by people with certain disabilities who experience difficulties using a mouse. What if people could use them as easily with a keyboard? OpenKeyNav is a zero-dependency JavaScript code library that exposes a developer-friendly API for initiating keyboard accessibility enhancements. We demonstrate a usage scenario of OpenKeyNav for improving the keyboard-accessibility of Voyager 2, an open-source web-based data visualization tool based on the shelf configuration similar to industry-leading Tableau. Since mouse-driven interactions such as drag-and-drop are found in software in a broad range of industries, the interaction methods we describe have potential implications for the education, employment, and autonomy of people with motor disabilities in various fields. A demonstration is at https://voyager-keyboard-demo.github.io/. Its instructions are at https://github.com/voyager-keyboard-demo/voyager-keyboard-demo.github.io/","accessible_pdf":false,"authors":[{"affiliations":["Harvard Medical School, Boston, United States"],"email":"lawrence_weru@hms.harvard.edu","is_corresponding":true,"name":"Lawrence Weru"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"sehi_lyi@hms.harvard.edu","is_corresponding":false,"name":"Sehi L'Yi"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"t.smits@hs-mannheim.de","is_corresponding":false,"name":"Thomas C. Smits"},{"affiliations":["Harvard Medical School, Boston, United States"],"email":"nils@hms.harvard.edu","is_corresponding":false,"name":"Nils Gehlenborg"}],"award":"","doi":"","event_id":"w-accessible","event_title":"1st Workshop on Accessible Data Visualization","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-accessible-1024","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-accessible/w-accessible-1024/w-accessible-1024_Preview.mp4?token=8lGsNR-yNTYFMwXgOKmP4pfhkvVnr109s4zTcvwsE_M&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-accessible/w-accessible-1024/w-accessible-1024_Preview.srt?token=CRAl72_NyqaVFwJ6c3wj9h3uLdf_MXP0nOQPALAGHDU&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop7","session_room":"Bayshore V","session_room_id":"bayshore5","session_title":"1st Workshop on Accessible Data Visualization","session_uid":"w-accessible","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["1st Workshop on Accessible Data Visualization"],"time_stamp":"2024-10-13T12:30:00Z","title":"Using OpenKeyNav to Enhance the Keyboard-Accessibility of Web-based Data Visualization Tools","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-future-1007","abstract":"Data physicalizations are a time-tested practice for visualizing data, but the sustainability challenges of current physicalization practices have only recently been explored; for example, the usage of carbon-intensive, non-renewable materials like plastic and metal.This work explores clay physicalizations as an approach to these challenges. Using a three-stage process, we investigate the design and sustainability of clay 3D printed physicalizations: 1) exploring the properties and constraints of clay when extruded through a 3D printer, 2) testing a variety of data encodings that work within the constraints, and 3) introducing Rain Gauge, a clay physicalization exploring climate effects on climate data with an impermanent material. Throughout our process, we investigate the material circularity of clay-based digital fabrication by reclaiming and reusing the clay stock in each stage. Finally, we reflect on the implications of ceramic 3D printing for data physicalization through the lenses of practicality and sustainability.","accessible_pdf":true,"authors":[{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"bridger.g.herman@gmail.com","is_corresponding":true,"name":"Bridger Herman"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"jlrossi@umn.edu","is_corresponding":false,"name":"Jessica Rossi-Mastracci"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"will1070@umn.edu","is_corresponding":false,"name":"Heather Willy"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"mreicher@umn.edu","is_corresponding":false,"name":"Molly Reichert"},{"affiliations":["University of Minnesota, Minneapolis, United States"],"email":"dfk@umn.edu","is_corresponding":false,"name":"Daniel F. Keefe"}],"award":"","doi":"","event_id":"w-future","event_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-future-1007","image_caption":"Rain Gauge is a clay data physicalization depicting monthly precipitation data from 1944-2024 on a cylindrical surface. Left panel: monthly precipitation in Minneapolis, MN, USA is encoded as line length outward from the surface. Middle panel: the printing process uses a 3D PotterBot 10 Pro ceramic 3D printer. Right panel: the Rain Gauge was set outside in the rain to explore environment-driven unmaking with the clay material. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://osf.io/preprints/osf/3nyrq","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop8","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","session_uid":"w-future","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation"],"time_stamp":"2024-10-14T16:00:00Z","title":"Rain Gauge: Exploring the Design and Sustainability of 3D Printed Clay Physicalizations","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-future-1008","abstract":"We explain our model of data-in-a-void and contrast it with the idea of data-voids to explore how the different framings impact our thinking on sustainability. This contrast supports our assertion that how we think about the data that we work with for visualization design impacts the direction of our thinking and our work. To show this we describe how we view the concept of data-in-a-void as different from that of data-voids. Then we provide two examples, one that relates to existing data about bicycle mobility, and one about non-data for local food production. In the discussion, we then untangle and outline how our thinking about data for sustainability is impacted and influenced by the data-in-a-void model.","accessible_pdf":false,"authors":[{"affiliations":["University of Calgary, Calgary, Canada"],"email":"karly.ross@ucalgary.ca","is_corresponding":true,"name":"Karly Ross"},{"affiliations":["University of Calgary, Calgary, Canada"],"email":"pratim.sengupta@ucalgary.ca","is_corresponding":false,"name":"Pratim Sengupta"},{"affiliations":["University of Calgary, Calgary, Canada"],"email":"wj@wjwillett.net","is_corresponding":false,"name":"Wesley Willett"}],"award":"","doi":"","event_id":"w-future","event_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-future-1008","image_caption":"We compare two models of how we think about data to inform our visualization process. Left shows an abstracted data set with the areas with no data blanked out in grey. This model has many voids, but all within the existing data structure. On the right, a tiny speck of white is in a void. This speck indicates all the data that is collected in what we perceive to be an infinite field of all the data that could be collected. We use this second model to think about new possibilities in data visualization practices.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop8","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","session_uid":"w-future","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation"],"time_stamp":"2024-10-14T16:00:00Z","title":"(Almost) All Data is Absent Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-future-1011","abstract":"This study explores energy issues across various nations, focusing on sustainable energy availability and accessibility. Representatives from all continents were selected based on their HDI values. Data from Kaggle, spanning 2000-2020, was analyzed using Python to address questions on electricity access, renewable energy generation, and fossil fuel consumption. The research employed statistical and data visualization techniques to reveal trends and disparities. Findings underscore the importance of Python and Kaggle in data analysis. The study suggests expanding datasets and incorporating predictive modeling for future research to enhance understanding and decision-making in energy policies.","accessible_pdf":false,"authors":[{"affiliations":["Faculdade Nova Roma, Recife, Brazil"],"email":"gustavodssilva456@gmail.com","is_corresponding":true,"name":"Gustavo Santos Silva"},{"affiliations":["Faculdade Nova Roma, Recife, Brazil"],"email":"lartur671@gmail.com","is_corresponding":false,"name":"Artur Vin\u00edcius Lima Silva"},{"affiliations":["Faculdade Nova Roma, Recife, Brazil"],"email":"lpsouza612@gmail.com","is_corresponding":false,"name":"Lucas Pereira Souza"},{"affiliations":["Faculdade Nova Roma, Recife, Brazil"],"email":"adrianlauzid@gmail.com","is_corresponding":false,"name":"Adrian Lauzid"},{"affiliations":["Universidade Federal de Pernambuco, Recife, Brazil"],"email":"djmm@cin.ufpe.br","is_corresponding":false,"name":"Davi Maia"}],"award":"","doi":"","event_id":"w-future","event_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-future-1011","image_caption":"This study uses Python and open data from Kaggle to visualize renewable energy generation and fossil fuel consumption from 2000-2020 across diverse nations. The research reveals global trends, disparities in energy access, and the role of data in driving sustainable energy solutions. Our findings contribute to shaping energy policy and decision-making for a more sustainable future.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop8","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","session_uid":"w-future","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation"],"time_stamp":"2024-10-14T16:00:00Z","title":"Renewable Energy Data Visualization: A study with Open Data","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-future-1012","abstract":"Information visualization holds significant potential to support sustainability goals such as environmental stewardship, and climate resilience by transforming complex data into accessible visual formats that enhance public understanding of complex climate change data and drive actionable insights. While the field has predominantly focused on analytical orientation of visualization, challenging traditional visualization techniques and goals, through ``critical visualization'' research expands existing assumptions and conventions in the field. In this paper, I explore how reimagining overlooked aspects of data visualization\u2014such as engagement, emotional resonance, communication, and community empowerment\u2014can contribute to achieving sustainability objectives. I argue that by focusing on inclusive data visualization that promotes clarity, understandability, and public participation, we can make complex data more relatable and actionable, fostering broader connections and mobilizing collective action on critical issues like climate change. Moreover, I discuss the role of emotional receptivity in environmental data communication, stressing the need for visualizations that respect diverse cultural perspectives and emotional responses to achieve impactful outcomes. Drawing on insights from a decade of research in public participation and community engagement, I aim to highlight how data visualization can democratize data access and increase public involvement in order to contribute to a more sustainable and resilient future.","accessible_pdf":false,"authors":[{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"nmahyar@cs.umass.edu","is_corresponding":true,"name":"Narges Mahyar"}],"award":"","doi":"","event_id":"w-future","event_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-future-1012","image_caption":"This figure represents the paper's key points, including (1) a review of emerging visualization theories that prioritize community engagement and social aspects, (2) dimensions for fostering community engagement, and (3) leveraging insights from fields such as public participation, participatory design, and communication studies to inform new theory development. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop8","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","session_uid":"w-future","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation"],"time_stamp":"2024-10-14T16:00:00Z","title":"Reimagining Data Visualization to Address Sustainability Goals","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-future-1013","abstract":"This position paper discusses the role of data visualizations in journalism based on new areas of study such as visual journalism and data journalism, using examples from the coverage of the catastrophe that occurred in 2024 in Rio Grande do Sul, Brazil, affecting over 2 million people. This case served as a warning to the country about the importance of the climate change agenda and its consequences. The paper includes a literature review in the fields of journalism, data visualization, and psychology to explore the importance of data visualization in combating misinformation and in producing more reliable journalism as tool for fighting climate change","accessible_pdf":false,"authors":[{"affiliations":["Universidade Federal de Pernambuco, Recife, Brazil"],"email":"emilly.brito@ufpe.br","is_corresponding":true,"name":"Emilly Brito"},{"affiliations":["Universidade Federal de Pernambuco, Recife, Brazil"],"email":"nivan@cin.ufpe.br","is_corresponding":false,"name":"Nivan Ferreira"}],"award":"","doi":"","event_id":"w-future","event_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-future-1013","image_caption":"Data visualization example produced in the Brazilian Media about the catastrophe in Rio Grande do Sul.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop8","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation","session_uid":"w-future","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["VISions of the Future: Workshop on Sustainable Practices within Visualization and Physicalisation"],"time_stamp":"2024-10-14T16:00:00Z","title":"Visual and Data Journalism as Tools for Fighting Climate Change","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1008","abstract":"Presenting the effects of and effective countermeasures for climate change is a significant challenge in science communication. Data-driven storytelling and narrative visualization can be part of the solution. However, the communication is limited when restricted to global or cross-regional scales, as climate effects are particular to the location and adaptions need to be local. In this work, we focus on data-driven storytelling that communicates local impacts of climate change. We analyze the adoption of data-driven storytelling by local news media in addressing climate-related topics. Further, we investigate the specific characteristics of the local scenario and present three application examples to showcase potential local data-driven stories. Since these examples are rooted in university teaching, we also discuss educational aspects. Finally, we summarize the interdisciplinary research challenges and opportunities for application associated with data-driven storytelling in a local context.","accessible_pdf":false,"authors":[{"affiliations":["University of Bamberg, Bamberg, Germany"],"email":"fabian.beck@uni-bamberg.de","is_corresponding":true,"name":"Fabian Beck"},{"affiliations":["University of Bamberg, Bamberg, Germany"],"email":"lukas.panzer@uni-bamberg.de","is_corresponding":false,"name":"Lukas Panzer"},{"affiliations":["University of Bamberg, Bamberg, Germany"],"email":"marc.redepenning@uni-bamberg.de","is_corresponding":false,"name":"Marc Redepenning"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-vis4climate-1008","image_caption":"The figure illustrates the characteristics of local climate data stories, focusing on how data-driven storytelling can communicate the effects and mitigation of climate change in a localized context. It shows the relationships between climate change, locality, data, and citizens through key characteristics of the scenario. The characteristic emphasize that specific local relevance, limited scope, local context, and participation are linked with the input data. The data stories support stakeholder engagement through familiarity, interest, concern, participation, and ultimately actionable conclusions for citizens.","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Local Climate Data Stories: Data-driven Storytelling to Communicate Effects and Mitigation of Climate Change in a Local Context","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1011","abstract":"Climate change\u2019s global impact calls for coordinated visualization efforts to enhance collaboration and communication among key partners such as domain experts, community members, and policy makers. We present a collaborative initiative, EcoViz, where visualization practitioners and key partners co-designed environmental data visualizations to illustrate impacts on ecosystems and the benefit of informed management and nature-based solutions. Our three use cases rely on unique processing pipelines to represent time-dependent natural phenomena by combining cinematic, scientific, and information visualization methods. Scientific outputs are displayed through narrative data-driven animations, interactive geospatial web applications, and immersive Unreal Engine applications. Each field\u2019s decision-making process is specific, driving design decisions about the best representation and medium for each use case. Data-driven cinematic videos with simple charts and minimal annotations proved most effective for engaging large, diverse audiences. This flexible medium facilitates reuse, maintains critical details, and integrates well into broader narrative videos. The need for interdisciplinary visualizations highlights the importance of funding to integrate visualization practitioners throughout the scientific process to better translate data and knowledge into informed policy and practice.","accessible_pdf":false,"authors":[{"affiliations":["University of California, San Diego, San Diego, United States"],"email":"jkb@ucsc.edu","is_corresponding":true,"name":"Jessica Marielle Kendall-Bar"},{"affiliations":["University of California, San Diego, La Jolla, United States"],"email":"inealey@ucsd.edu","is_corresponding":false,"name":"Isaac Nealey"},{"affiliations":["University of California, Santa Cruz, Santa Cruz, United States"],"email":"icostell@ucsc.edu","is_corresponding":false,"name":"Ian Costello"},{"affiliations":["University of California, Santa Cruz, Santa Cruz, United States"],"email":"chlowrie@ucsc.edu","is_corresponding":false,"name":"Christopher Lowrie"},{"affiliations":["University of California, San Diego, San Diego, United States"],"email":"khn009@ucsd.edu","is_corresponding":false,"name":"Kevin Huynh Nguyen"},{"affiliations":["University of California San Diego, La Jolla, United States"],"email":"pponganis@ucsd.edu","is_corresponding":false,"name":"Paul J. Ponganis"},{"affiliations":["University of California, Santa Cruz, Santa Cruz, United States"],"email":"mwbeck@ucsc.edu","is_corresponding":false,"name":"Michael W. Beck"},{"affiliations":["University of California, San Diego, San Diego, United States"],"email":"ialtintas@ucsd.edu","is_corresponding":false,"name":"\u0130lkay Alt\u0131nta\u015f"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-vis4climate-1011","image_caption":"Graphic showing the three use cases for EcoViz, a collaborative initiative to co-design multimodal environmental data visualizations. Above, we show an immersive Unreal Engine visualization of a controlled burn simulation to manage wildfire. Below, we show a photo-realistic rendering of hydrodynamic model outputs regarding the flood protection benefits of coral reefs. The circular graphic in the center shows thousands of autonomous profiling Argo floats that survey changes in temperature and salinity to track heat accumulation in the ocean. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-vis4climate/w-vis4climate-1011/w-vis4climate-1011_Preview.mp4?token=XM88vdwuEwLEUhn8-gWWbE6qIZuzh0fLBtKc3HP1dLg&expires=1730433600","session_bunny_ff_subtitles":"https://ieeevis-uploads.b-cdn.net/vis24/w-vis4climate/w-vis4climate-1011/w-vis4climate-1011_Preview.srt?token=6p9mlLEDy2QDkEGnKm78tvINdyMMpm4ZxOhMHEE59h4&expires=1730433600","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"uX7XpQo2VGs","session_youtube_ff_link":"https://youtu.be/uX7XpQo2VGs","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"EcoViz: an iterative methodology for designing multifaceted data-driven environmental visualizations that communicate ecosystem impacts and envision nature-based solutions","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1018","abstract":"Household consumption significantly impacts climate change. Al- though interventions that make households aware of their consump- tion exist, tailoring the design to each home\u2019s needs remains chal- lenging. To address this, we developed Eco-Garden, a data sculp- ture designed to visualise household consumption aiming to pro- mote sustainable practices. Eco-Garden serves as both an aesthetic piece for visitors and a functional tool for household members to understand their resource consumption. In this paper, we present the human-centred design process of Eco-Garden and the prelim- inary findings we made through the field study. We conducted a field study with 15 households to explore participants\u2019 experience with Eco-Garden and its potential to encourage sustainable prac- tices at home. Our participants provided positive feedback on inte- grating Eco-Garden into their homes, highlighting considerations such as aesthetics, physicality, calm manner of presenting con- sumption data. Our Insights contribute to developing data sculp- tures for households that can facilitate meaningful interactions with consumption data.","accessible_pdf":false,"authors":[{"affiliations":["Cardiff University, UK, Cardiff, United Kingdom"],"email":"pereraud@cardiff.ac.uk","is_corresponding":true,"name":"Dushani Ushettige"},{"affiliations":["Cardiff University, Cardiff, United Kingdom"],"email":"verdezotodiasn@cardiff.ac.uk","is_corresponding":false,"name":"Nervo Verdezoto"},{"affiliations":["Cardiff University, Cardiff, United Kingdom"],"email":"lannon@cardiff.ac.uk","is_corresponding":false,"name":"Simon Lannon"},{"affiliations":["Cardiff Universiy, Cardiff, United Kingdom"],"email":"gwilliamja@cardiff.ac.uk","is_corresponding":false,"name":"Jullie Gwilliam"},{"affiliations":["Cardiff University, Cardiff, United Kingdom"],"email":"eslambolchilarp@cardiff.ac.uk","is_corresponding":false,"name":"Parisa Eslambolchilar"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1018","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Eco-Garden: A Data Sculpture to Encourage Sustainable Practices in Everyday Life in Households","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1023","abstract":"Consumers have the potential to play a large role in mitigating the climate crisis by taking on more pro-environmental behavior, for example by making more sustainable food choices. However, while environmental awareness is common among consumers, it is not always clear what the current impact of one's own food choices are, and consequently it is not always clear how or why their own behavior must change, or how important the change is. Immersive technologies have been shown to aid in these aspects. In this paper, we bring food production into the home by means of handheld augmented reality. Using the current prototype, users can input which ingredients are in their meal on their smartphone, and after making a 3D scan of their kitchen, plants, livestock, feed, and water required for all are visualized in front of them. In this paper, we describe the design of the current prototype and, by analyzing the current state of research on virtual and augmented reality for sustainability research, we describe in which ways the application could be extended in terms of data, models, and interaction, to investigate the most prominent issues within environmental sustainability communications research.","accessible_pdf":true,"authors":[{"affiliations":["Wageningen University and Research, Wageningen, Netherlands"],"email":"nina.rosa-dejong@wur.nl","is_corresponding":true,"name":"Nina Rosa"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-vis4climate-1023","image_caption":"Screenshots of the handheld augmented reality AwARe prototype, showing an ingredients list for a simple meat-centered meal, and crops, water and livestock required for the meat-centered meal, visualized in a kitchen and dining room. ","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"https://nerosa.nl/wp-content/uploads/2024/08/Viz4CandS_AwARe___author_version_for_open_access.pdf","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"AwARe: Using handheld augmented reality for researching the potential of food resource information visualization","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1024","abstract":"This paper details the development and implementation of a collaborative exhibit at Boston\u2019s Museum of Science showcasing interactive data visualizations designed to educate the public on global sustainability and urban environmental concerns. Supported by cross-institutional collaboration, the exhibit provided a rich real-world learning opportunity for students, resulting in a set of public-facing educational resources that informed visitors of global sustainability concerns through the lens of a local municipality. The realization of this project was made possible only by a close collaboration between a municipality, science museum and academic partners, all who committed their expertise and resources at both leadership and implementation team levels.This initiative highlights the value of cross- institutional collaboration to ignite the transformative potential of interactive visualizations in driving public engagement of local and global sustainability issues. Focusing on promoting sustainability and enhancing community well-being, this initiative highlights the potential of cross-institutional collaboration and locally-relevant interactive data visualizations to educate, inspire action, and foster community engagement in addressing climate change and urban sustainability.","accessible_pdf":false,"authors":[{"affiliations":["Brown University, Providence, United States","Rhode Island School of Design, Providence, United States"],"email":"bae@brown.edu","is_corresponding":true,"name":"Beth Altringer Eagle"},{"affiliations":["Harvard University, Cambridge, United States"],"email":"sylvan@media.mit.edu","is_corresponding":false,"name":"Elisabeth Sylvan"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":true,"has_pdf":"","id":"w-vis4climate-1024","image_caption":"Four examples of interactive data visualizations created by students at Harvard, Brown and RISD using open data from the city of Boston and presented at the Museum of Science","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"https://ieeevis-uploads.b-cdn.net/vis24/w-vis4climate/w-vis4climate-1024/w-vis4climate-1024_Preview.mp4?token=aQj_tjR4bHboHrvd75dSimXws-mWAlFpe0BQ2-hucto&expires=1730433600","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"qZBMOrWz8hI","session_youtube_ff_link":"https://youtu.be/qZBMOrWz8hI","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Cultivating Climate Action Through Multi-Institutional Collaboration: Innovative Data Visualization Educational Programs and Exhibits for Public Engagement","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1010","abstract":"The urgency of climate change is now recognized globally. As humanity confronts the critical need to mitigate climate change and foster sustainability, data visualization emerges as a powerful tool with a unique capacity to communicate insights crucial for understanding environmental complexities. This paper explores the critical need for designing and investigating responsible data visualization that can act as a catalyst for engaging communities within global climate action and sustainability efforts. Grounded in prior work and reflecting on a decade of community engagement research, I propose five critical considerations: (1) inclusive and accessible visualizations for enhancing climate education and communication, (2) interactive visualizations for fostering agency and deepening engagement, (3) in-situ visualizations for reducing spatial indirection, (4) shared immersive experiences for catalyzing collective action, and (5) accurate, transparent, and credible visualizations for ensuring trust and integrity. These considerations offer strategies and new directions for visualization research, aiming to enhance community engagement, deepen involvement, and foster collective action on critical socio-technical including and beyond climate change.","accessible_pdf":false,"authors":[{"affiliations":["University of Massachusetts Amherst, Amherst, United States"],"email":"narges.mahyar@gmail.com","is_corresponding":true,"name":"Narges Mahyar"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1010","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Harnessing Visualization for Climate Action and Sustainable Future","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1020","abstract":"Satellite Earth Observation (EO) data is essential for tracking climate change trends and their impacts on ecosystems, however conventional methods of presenting EO data often fail to effectively communicate the intricate relationships between climate causes and effects in hyperlocal contexts. To address this challenge, this paper investigates the use of advanced data visualization techniques, focusing on the potential of Augmented Reality (AR) and Virtual Reality (VR) to enhance EO data understanding and climate storytelling. Leveraging the MIT Media Lab's Earth Mission Control (EMC) AR/VR platform, the paper details how immersive VR environments can simplify complex climate data narratives, and enhance the ability of decision-makers to analyze, interact with, and understand EO data. The paper presents the architecture of EMC\u2019s platform, including key design features such as: information dashboard carousel; map table; globe; and dynamic scenic VR environments. User feedback from diverse stakeholders reveals significant improvements in climate communication and decision-making, emphasizing the capability of leveraging immersive technologies to address global climate challenges.","accessible_pdf":false,"authors":[{"affiliations":["MIT Media Lab, Cambridge, MA, United States"],"email":"minoo@media.mit.edu","is_corresponding":false,"name":"Minoo Rathnasabapathy"},{"affiliations":["MIT Media Lab, Cambridge, United States"],"email":"dnewman@mit.edu","is_corresponding":false,"name":"Dava Newman"},{"affiliations":["MIT Media Lab, Cambridge, United States"],"email":"rachelbc@media.mit.edu","is_corresponding":true,"name":"Rachel Connolly"},{"affiliations":["MIT Media Lab, Cambridge, United States"],"email":"pcherner@mit.edu","is_corresponding":false,"name":"Phillip Cherner"},{"affiliations":["MIT Media Lab, Cambridge, United States"],"email":"palmjad2@mit.edu","is_corresponding":false,"name":"Jaden Palmer"},{"affiliations":["NASA Goddard Space Flight Center, Greenbelt, United States"],"email":"mark.u.subbarao@nasa.gov","is_corresponding":false,"name":"Mark SubbaRao"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1020","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Earth Mission Control: Advanced Data Visualizations for Climate Intelligence","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1029","abstract":"Artists have been speaking to, and creating paths for reflection on, fundamental threats to society and our lives as far back as we can document. Our changing climate is one such threat demanding meaningful narratives. In this short paper, we present the work of six internationally recognized artists addressing climate change, along with an analysis of their common work threads, toward the goal of promoting adoption of some of the \"tools\" in their toolkit. By doing so, we hope we can assist the visualization community in creating content that moves beyond intellectual understand toward an emotional adoption and thus action.","accessible_pdf":false,"authors":[{"affiliations":["University of Texas at Austin, Austin, United States"],"email":"fsamsel@tacc.utexas.edu","is_corresponding":true,"name":"Francesca Samsel"},{"affiliations":["Rhode Island School of Design, Providence, United States"],"email":"bcampbel01@risd.edu","is_corresponding":false,"name":"Bruce Donald Campbell"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1029","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Artists, Data and Climate Change: Distilled messages, multiple entry points, layered metaphor","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1033","abstract":"Freshwater floods during hurricanes are known to cause significant damage to life and property. We could be better prepared to prevent these losses if flood forecasts can be made accurately and understood effectively. In addition to the technical complexities when modeling freshwater systems, forecasting freshwater floods also involves numerous uncertainties which also need to be considered to make reliable data driven decisions. In this demo, we describe the design and implementation of HydroVis\u2013a decision support system designed to help both weather scientists to triage the flood forecasting models, and the policymakers to help them understand the forecasts effectively and make informed decisions accordingly.","accessible_pdf":false,"authors":[{"affiliations":["University of Washington, Seattle, United States"],"email":"ameyap2@cs.washington.edu","is_corresponding":true,"name":"Ameya B Patil"},{"affiliations":["National Center for Atmospheric Research, Boulder, United States"],"email":"masmith@ucar.edu","is_corresponding":false,"name":"Marlee Smith"},{"affiliations":["National Center for Atmospheric Research, Boulder, United States"],"email":"hkershaw@ucar.edu","is_corresponding":false,"name":"Helen Kershaw"},{"affiliations":["National Center for Atmospheric Research, Boulder, United States"],"email":"gharamti@ucar.edu","is_corresponding":false,"name":"Moha El Gharamti"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1033","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Interactive Visualization of Ensemble Data Assimilation Forecasts for Freshwater Floods","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1034","abstract":"While there is a well-known gap between what the general public and policymakers understand about science and what is known by experts, this gap is particularly perilous in regard to climate change. Currently, scientists inform each other via expert publications and conferences. We, as part of the public and policymakers, receive our information via the media and the web \u2013 and in our current catastrophic blending of information with misinformation, we are at risk of well-intentionally taking ineffective or even harmful actions and decisions. To close this gap, a team of experts in data visualization, narrative construction, data comics, and climate change work collaboratively to develop climate change data comics that combine compelling narratives with comprehensible data visuals that are informed and verified by the appropriate scientists. This pictorial outlines our approach and provides two examples, emphasizing the integration of storytelling, scientific explanation, and data visualization through expressive visual presentations.","accessible_pdf":false,"authors":[{"affiliations":["Simon Fraser University, Vancouver, Canada"],"email":"wangzezhong2016@gmail.com","is_corresponding":true,"name":"Zezhong Wang"},{"affiliations":["Carleton University, Ottawa, Canada"],"email":"stephan.gruber@carleton.ca","is_corresponding":false,"name":"Stephan Gruber"},{"affiliations":["University of Manitoba, Winnipeg, Canada"],"email":"claire.herbert@umanitoba.ca","is_corresponding":false,"name":"Claire Herbert"},{"affiliations":["Simon Fraser University, Vancouver, Canada"],"email":"zandria_sarrazin@sfu.ca","is_corresponding":false,"name":"Zandria Sarrazin"},{"affiliations":["SFU, Burnaby, Canada"],"email":"mnl@sfu.ca","is_corresponding":false,"name":"Michelle Levy"},{"affiliations":["Simon Fraser University, Burnaby, Canada"],"email":"sheelagh@sfu.ca","is_corresponding":false,"name":"Sheelagh Carpendale"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1034","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Data Comics for Climate Change","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1038","abstract":"The impacts of climate change are intensifying existing vulnerabilities and disparities within urban communities around the globe, as extreme weather events, including floods and heatwaves, are becoming more frequent and severe, disproportionately affecting low-income and underrepresented groups. Tackling these increasing challenges requires novel approaches that integrate expertise across multiple domains, including computer science, engineering, climate science, and public health. Urban computing can play a pivotal role in these efforts by integrating data from multiple sources to support decision-making and provide actionable insights into weather patterns, infrastructure weaknesses, and population vulnerabilities. However, the capacity to leverage technological advancements varies significantly between the Global South and Global North. In this paper, we present two multiyear, multidisciplinary projects situated in Chicago, USA and Niter\u00f3i, Brazil, highlighting the opportunities and limitations of urban computing in these diverse contexts. Reflecting on our experiences, we then discuss the essential requirements, as well as existing gaps, for visual analytics tools that facilitate the understanding and mitigation of climate-related risks in urban environments.","accessible_pdf":false,"authors":[{"affiliations":["University of Illinois, Chicago, United States"],"email":"carolvfs@illinois.edu","is_corresponding":false,"name":"Carolina Veiga"},{"affiliations":["University of Illinois, Chicago, United States"],"email":"sharmaa@illinois.edu","is_corresponding":false,"name":"Ashish Sharma"},{"affiliations":["Universidade Federal Fluminense, Niter\u00f3i, Brazil"],"email":"danielcmo@ic.uff.br","is_corresponding":false,"name":"Daniel de Oliveira"},{"affiliations":["Universidade Federal Fluminense , Niteroi, Brazil"],"email":"mlage@ic.uff.br","is_corresponding":false,"name":"Marcos Lage"},{"affiliations":["University of Illinois Chicago, Chicago, United States"],"email":"fabiom@uic.edu","is_corresponding":true,"name":"Fabio Miranda"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1038","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Urban Computing for Climate And Environmental Justice: Early Perspectives From Two Research Initiatives","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1039","abstract":"This position statement discusses the challenges of designing visualizations to enhance the carbon numeracy of the general public. Carbon numeracy refers to an individual's quantitative awareness of their CO2 emissions, which can vary widely from grams to tons across different activities. Effective visualizations must accurately represent these ranges and facilitate quantitative comparisons. By leveraging insights from both visualization research and cognitive psychology on numerical perception and the representation of large numbers, we propose two novel design solutions to address these challenges. We aim to foster discussions on improving public carbon numeracy, ultimately aiding in mitigating climate change.","accessible_pdf":false,"authors":[{"affiliations":["Berger-Levrault, Boulogne-Billancourt, France","Inria, Saclay, France"],"email":"kbatziakoudi@gmail.com","is_corresponding":true,"name":"Katerina Batziakoudi"},{"affiliations":["Aviz, Inria, Saclay, France","LISN, Universit\u00e9 Paris-Saclay, CNRS, Orsay, France"],"email":"florent.cabric.pro@gmail.com","is_corresponding":false,"name":"Florent Cabric"},{"affiliations":["Berger-Levrault, Toulouse, France"],"email":"stephanie.rey@berger-levrault.com","is_corresponding":false,"name":"St\u00e9phanie Rey"},{"affiliations":["Inria, Saclay, France","Universit\u00e9 Paris-Saclay, CNRS, Orsay, France"],"email":"jean-daniel.fekete@inria.fr","is_corresponding":false,"name":"Jean-Daniel Fekete"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1039","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Designing Visualizations for Enhancing Carbon Numeracy","youtube_ff_id":null,"youtube_ff_url":null},{"UID":"w-vis4climate-1040","abstract":"The Intergovernmental Panel on Climate Change (IPCC) plays a pivotal role in assessing and communicating climate science through its comprehensive reports. Despite the IPCC's efforts to provide source code and data for report figures, reproducing these figures is still challenging. This paper details our approach and the obstacles encountered in creating reproducible visualizations from the IPCC Working Group 1 data. Our work involved developing a set of front-end GitHub repositories that build upon the IPCC's original resources, incorporating reproducibility instructions and scripts to closely replicate the report\u2019s figures. By providing reproducible figures, we aim to enhance public engagement and contribution to climate change communication, ensuring accuracy and facilitating iterative improvements in figure presentation.","accessible_pdf":false,"authors":[{"affiliations":["Zhejiang University, Hangzhou, China","INRIA, Saclay, France"],"email":"yingluu@zju.edu.cn","is_corresponding":true,"name":"Lu Ying"},{"affiliations":["Zhejiang University, Hangzhou, China"],"email":"ycwu@zju.edu.cn","is_corresponding":false,"name":"Yingcai Wu"},{"affiliations":["Inria, Saclay, France"],"email":"jean-daniel.fekete@inria.fr","is_corresponding":false,"name":"Jean-Daniel Fekete"}],"award":"","doi":"","event_id":"w-vis4climate","event_title":"Visualization for Climate Action and Sustainability","external_paper_link":"","fno":"","has_fno":false,"has_image":false,"has_pdf":"","id":"w-vis4climate-1040","image_caption":"","keywords":[],"open_access_supplemental_link":"","open_access_supplemental_question":"","paper_type":"workshop","paper_type_color":"#f4a261","paper_type_name":"Workshop","preprint_link":"","prerecorded_video_id":null,"prerecorded_video_link":"","session_bunny_ff_link":"","session_bunny_ff_subtitles":"","session_bunny_prerecorded_link":null,"session_bunny_prerecorded_subtitles":null,"session_id":"workshop9","session_room":"Esplanade Suites I + II + III","session_room_id":"esplanadesuites","session_title":"Visualization for Climate Action and Sustainability","session_uid":"w-vis4climate","session_youtube_ff_id":"","session_youtube_ff_link":"","session_youtube_prerecorded_id":null,"session_youtube_prerecorded_link":"","sessions":["Visualization for Climate Action and Sustainability"],"time_stamp":"2024-10-14T12:30:00Z","title":"Exploring the Reproducibility for Visualization Figures in Climate Change Report","youtube_ff_id":null,"youtube_ff_url":null}] diff --git a/src/index.js b/src/index.js index 482e174cd..44fb6c794 100644 --- a/src/index.js +++ b/src/index.js @@ -7,7 +7,8 @@ import { createAuth0Client } from "@auth0/auth0-spa-js"; const updateUI = async (auth0, query) => { - const is_auth = await auth0.isAuthenticated(); + // const is_auth = await auth0.isAuthenticated(); + const is_auth = true; //console.log("are we auth?", is_auth) if (is_auth) { document.body.style.display = null;