Loading…

Publications

2024

J. Janeiro, S. Alves, T. Guerreiro, V. Distler, and F. Alt. Understanding phishing experiences of screen reader users. Ieee security & privacy, 2024.
[BibTeX] [PDF]
@Article{janeiro2024ieeesp,
author = {Joao Janeiro AND Sergio Alves AND Tiago Guerreiro AND Verena Distler AND Florian Alt},
journal = {IEEE Security \& Privacy},
title = {Understanding Phishing Experiences of Screen Reader Users},
year = {2024},
month = dec,
note = {janeiro2024ieeesp},
timestamp = {2024.12.09},
url = {http://www.florian-alt.org/unibw/wp-content/publications/janeiro2024ieeesp.pdf},
}
V. Paneva, M. Strauss, V. Winterhalter, S. Schneegass, and F. Alt. Privacy in the metaverse. Ieee pervasive computing, vol. 20, iss. 4, p. 5, 2024. doi:10.1109/MPRV.2024.3432953
[BibTeX] [PDF]
@Article{paneva2024ieeepvc,
author = {Viktorija Paneva AND Marvin Strauss AND Verena Winterhalter AND Stefan Schneegass AND Florian Alt},
journal = {IEEE Pervasive Computing},
title = {Privacy in the Metaverse},
year = {2024},
month = dec,
note = {paneva2024ieeepvc},
number = {4},
pages = {5},
volume = {20},
doi = {10.1109/MPRV.2024.3432953},
timestamp = {2024.12.09},
url = {http://www.florian-alt.org/unibw/wp-content/publications/paneva2024ieeepvc.pdf},
}
F. Dietz, L. Mecke, D. Riesner, and F. Alt. Delusio – Plausible Deniability For Face Recognition. Proceedings of the acm on human-computer interaction, vol. 6, iss. MHCI, 2024. doi:10.1145/3676494
[BibTeX] [Abstract] [PDF]
We developed an Android phone unlock mechanism utilizing facial recognition and specific mimics to access a specially secured portion of the device, designed for plausible deniability. The widespread adoption of biometric authentication methods, such as fingerprint and facial recognition, has revolutionized mobile device security, offering enhanced protection against shoulder-surfing attacks and improving user convenience compared to traditional passwords. However, a downside is the potential for coercion by third parties to unlock the device. While text-based authentication allows users to reveal a hidden system by entering a special password, this is challenging with face authentication. We evaluated our approach in a role-playing user study involving 50 participants, with one participant acting as the attacker and the other as the suspect. Suspects successfully accessed the secured area; mostly without detection. They further expressed interest in this feature on their personal phones. We also discuss open challenges and opportunities in implementing such authentication mechanisms.
@Journal{dietz2024mobilehci,
abstract = {We developed an Android phone unlock mechanism utilizing facial recognition and specific mimics to access a specially secured portion of the device, designed for plausible deniability. The widespread adoption of biometric authentication methods, such as fingerprint and facial recognition, has revolutionized mobile device security, offering enhanced protection against shoulder-surfing attacks and improving user convenience compared to traditional passwords. However, a downside is the potential for coercion by third parties to unlock the device. While text-based authentication allows users to reveal a hidden system by entering a special password, this is challenging with face authentication. We evaluated our approach in a role-playing user study involving 50 participants, with one participant acting as the attacker and the other as the suspect. Suspects successfully accessed the secured area; mostly without detection. They further expressed interest in this feature on their personal phones. We also discuss open challenges and opportunities in implementing such authentication mechanisms.},
address = {New York, NY, USA},
articleno = {249},
author = {Felix Dietz AND Lukas Mecke AND Daniel Riesner AND Florian Alt},
doi = {10.1145/3676494},
issue_date = {September 2024},
journal = {Proceedings of the ACM on Human-Computer Interaction},
keywords = {biometrics, facial authentication, plausible deniability},
month = sep,
note = {dietz2024mobilehci},
number = {MHCI},
numpages = {13},
publisher = {Association for Computing Machinery},
timestamp = {2024.10.27},
title = {{Delusio - Plausible Deniability For Face Recognition}},
url = {http://www.florian-alt.org/unibw/wp-content/publications/dietz2024mobilehci.pdf},
volume = {6},
year = {2024},
}
S. Prange, P. Knierim, G. Knoll, F. Dietz, A. D. Luca, and F. Alt. “I do (not) need that Feature!” – Understanding Users’ Awareness and Control of Privacy Permissions on Android Smartphones. In Twentieth Symposium on Usable Privacy and Security (SOUPS’24), USENIX, Philadelphia, PA, USA, 2024.
[BibTeX] [Abstract] [PDF]
We present the results of the first field study (N = 132 ) investigating users’ (1) awareness of Android privacy permissions granted to installed apps and (2) control behavior over these permissions. Our research is motivated by many smartphone features and apps requiring access to personal data. While Android provides privacy permission management mechanisms to control access to this data, its usage is not yet well understood. To this end, we built and deployed an Android application on participants’ smartphones, acquiring data on actual privacy permission states of installed apps, monitoring permission changes, and assessing reasons for changes using experience sampling. The results of our study show that users often conduct multiple revocations in short time frames, and revocations primarily affect rarely used apps or permissions non-essential for apps’ core functionality. Our findings can inform future (proactive) privacy control mechanisms and help target opportune moments for supporting privacy control.
@InProceedings{prange2024soups,
author = {Sarah Prange AND Pascal Knierim AND Gabriel Knoll AND Felix Dietz AND Alexander De Luca AND Florian Alt},
booktitle = {{Twentieth Symposium on Usable Privacy and Security}},
title = {{``I do (not) need that Feature!'' – Understanding Users’ Awareness and Control of Privacy Permissions on Android Smartphones}},
year = {2024},
address = {Philadelphia, PA, USA},
month = aug,
note = {prange2024soups},
publisher = {USENIX},
series = {SOUPS'24},
abstract = {We present the results of the first field study (N = 132 ) investigating users’ (1) awareness of Android privacy permissions granted to installed apps and (2) control behavior over these permissions. Our research is motivated by many smartphone features and apps requiring access to personal data. While Android provides privacy permission management mechanisms to control access to this data, its usage is not yet well understood. To this end, we built and deployed an Android application on participants’ smartphones, acquiring data on actual privacy permission states of installed apps, monitoring permission changes, and assessing reasons for changes using experience sampling. The results of our study show that users often conduct multiple revocations in short time frames, and revocations primarily affect rarely used apps or permissions non-essential for apps’ core functionality. Our findings can inform future (proactive) privacy control mechanisms and help target opportune moments for supporting privacy control.},
timestamp = {2024.08.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2024soups.pdf},
}
E. Bouquet, S. Von Der Au, C. Schneegass, and F. Alt. Coar-tv: design and evaluation of asynchronous collaboration in ar-supported tv experiences. In Proceedings of the 2024 acm international conference on interactive media experiences (IMX ’24), Association for Computing Machinery, New York, NY, USA, 2024, p. 231–245. doi:10.1145/3639701.3656320
[BibTeX] [Abstract] [PDF]
Television has long since been a uni-directional medium. However, when TV is used for educational purposes, like in edutainment shows, interactivity could enhance the learning benefit for the viewer. In recent years, AR has been increasingly explored in HCI research to enable interaction among viewers as well as viewers and hosts. Yet, how to implement this collaborative AR (CoAR) experience remains an open research question. This paper explores four approaches to asynchronous collaboration based on the Cognitive Apprenticeship Model: scaffolding, coaching, modeling, and collaborating. We developed a pilot show for a fictional edutainment series and evaluated the concept with two TV experts. In a wizard-of-oz study, we test our AR prototype with eight users and evaluate the perception of the four collaboration styles. The AR-enhanced edutainment concept was well-received by the participants, and the coaching collaboration style was perceived as favorable and could possibly be combined with the modeling style.
@InProceedings{bouquet2024imx,
author = {Bouquet, Elizabeth and Von Der Au, Simon and Schneegass, Christina and Alt, Florian},
booktitle = {Proceedings of the 2024 ACM International Conference on Interactive Media Experiences},
title = {CoAR-TV: Design and Evaluation of Asynchronous Collaboration in AR-Supported TV Experiences},
year = {2024},
address = {New York, NY, USA},
note = {bouquet2024imx},
pages = {231–245},
publisher = {Association for Computing Machinery},
series = {IMX '24},
abstract = {Television has long since been a uni-directional medium. However, when TV is used for educational purposes, like in edutainment shows, interactivity could enhance the learning benefit for the viewer. In recent years, AR has been increasingly explored in HCI research to enable interaction among viewers as well as viewers and hosts. Yet, how to implement this collaborative AR (CoAR) experience remains an open research question. This paper explores four approaches to asynchronous collaboration based on the Cognitive Apprenticeship Model: scaffolding, coaching, modeling, and collaborating. We developed a pilot show for a fictional edutainment series and evaluated the concept with two TV experts. In a wizard-of-oz study, we test our AR prototype with eight users and evaluate the perception of the four collaboration styles. The AR-enhanced edutainment concept was well-received by the participants, and the coaching collaboration style was perceived as favorable and could possibly be combined with the modeling style.},
doi = {10.1145/3639701.3656320},
isbn = {9798400705038},
keywords = {AR, collaboration, edutainment, interactive television, mobile},
location = {Stockholm, Sweden},
numpages = {15},
timestamp = {2024.06.10},
url = {http://florian-alt.org/unibw/wp-content/publications/bouquet2024imx.pdf},
}
K. Marky, A. Stöver, S. Prange, K. Bleck, P. G. V. Zimmermann, F. Müller, F. Alt, and M. Mühlhäuser. Decide Yourself or Delegate – User Preferences Regarding the Autonomy of Personal Privacy Assistants in Private IoT-Equipped Environments. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems (CHI ’24), Association for Computing Machinery, New York, NY, USA, 2024. doi:10.1145/3613904.3642591
[BibTeX] [Abstract] [PDF] [Video]
Personalized privacy assistants (PPAs) communicate privacy-related decisions of their users to Internet of Things (IoT) devices. There are different ways to implement PPAs by varying the degree of autonomy or decision model. This paper investigates user perceptions of PPA autonomy models and privacy profiles – archetypes of individual privacy needs – as a basis for PPA decisions in private environments (e.g., a friend’s home). We first explore how privacy profiles can be assigned to users and propose an assignment method. Next, we investigate user perceptions in 18 usage scenarios with varying contexts, data types and number of decisions in a study with 1126 participants. We found considerable differences between the profiles in settings with few decisions. If the number of decisions gets high ($>$ 1/h), participants exclusively preferred fully autonomous PPAs. Finally, we discuss implications and recommendations for designing scalable PPAs that serve as privacy interfaces for future IoT devices.
@InProceedings{marky2024chi,
author = {Karola Marky AND Alina Stöver AND Sarah Prange AND Kira Bleck AND Paul Gerber Verena Zimmermann AND Florian Müller AND Florian Alt AND Max Mühlhäuser},
booktitle = {{Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems}},
title = {{Decide Yourself or Delegate - User Preferences Regarding the Autonomy of Personal Privacy Assistants in Private IoT-Equipped Environments}},
year = {2024},
address = {New York, NY, USA},
note = {marky2024chi},
publisher = {Association for Computing Machinery},
series = {CHI ’24},
abstract = {Personalized privacy assistants (PPAs) communicate privacy-related decisions of their users to Internet of Things (IoT) devices. There are different ways to implement PPAs by varying the degree of autonomy or decision model. This paper investigates user perceptions of PPA autonomy models and privacy profiles -- archetypes of individual privacy needs -- as a basis for PPA decisions in private environments (e.g., a friend's home). We first explore how privacy profiles can be assigned to users and propose an assignment method. Next, we investigate user perceptions in 18 usage scenarios with varying contexts, data types and number of decisions in a study with 1126 participants. We found considerable differences between the profiles in settings with few decisions. If the number of decisions gets high ($>$ 1/h), participants exclusively preferred fully autonomous PPAs. Finally, we discuss implications and recommendations for designing scalable PPAs that serve as privacy interfaces for future IoT devices.},
doi = {10.1145/3613904.3642591},
isbn = {979-8-4007-0330-0/24/05},
location = {Honolulu, HI, USA},
timestamp = {2024.05.16},
url = {http://florian-alt.org/unibw/wp-content/publications/marky2024chi.pdf},
video = {marky2024chi},
}
F. Alt, P. Knierim, J. Williamson, and J. Paradiso. The pervasive multiverse. Ieee pervasive computing, vol. 23, iss. 1, p. 7–9, 2024. doi:10.1109/MPRV.2024.3385528
[BibTeX] [Abstract] [PDF]
The pervasive multiverse, an interconnected web of diverse and dynamic digital landscapes, promises to redefine our understanding of computing’s reach and impact. Hereby, it extends beyond the traditional boundaries of pervasive computing: digital ecosystems seamlessly intertwine with the physical, creating an immersive and interconnected experience across devices, contexts, and users.
@Article{alt2024ieeepvcsi,
author = {Alt, Florian and Knierim, Pascal and Williamson, Julie and Paradiso, Joe},
journal = {IEEE Pervasive Computing},
title = {The Pervasive Multiverse},
year = {2024},
issn = {1536-1268},
month = jun,
note = {alt2024ieeepvcsi},
number = {1},
pages = {7–9},
volume = {23},
abstract = {The pervasive multiverse, an interconnected web of diverse and dynamic digital landscapes, promises to redefine our understanding of computing's reach and impact. Hereby, it extends beyond the traditional boundaries of pervasive computing: digital ecosystems seamlessly intertwine with the physical, creating an immersive and interconnected experience across devices, contexts, and users.},
address = {USA},
doi = {10.1109/MPRV.2024.3385528},
issue_date = {Jan.-March 2024},
numpages = {3},
publisher = {IEEE Educational Activities Department},
timestamp = {2024.05.16},
url = {https://doi.org/10.1109/MPRV.2024.3385528},
}
S. D. Rodriguez, P. Chatterjee, A. D. Phuong, F. Alt, and K. Marky. Do You Need to Touch? Exploring Correlations between Personal Attributes and Preferences for Tangible Privacy Mechanisms. In Proceedings of the 2024 CHI Conference on Human Factors in Computing Systems (CHI ’24), Association for Computing Machinery, New York, NY, USA, 2024. doi:10.1145/3613904.3642863
[BibTeX] [Abstract] [PDF] [Video]
This paper explores how personal attributes, such as age, gender, technological expertise, or “need for touch”, correlate with people’s preferences for properties of tangible privacy protection mechanisms, for example, physically covering a camera. For this, we conducted an online survey (N = 444) where we captured participants’ preferences of eight established tangible privacy mechanisms well-known in daily life, their perceptions of effective privacy protection, and personal attributes. We found that the attributes that correlated most strongly with participants’ perceptions of the established tangible privacy mechanisms were their “need for touch” and previous experiences with the mechanisms. We use our findings to identify desirable characteristics of tangible mechanisms to better inform future tangible, digital, and mixed privacy protections. We also show which individuals benefit most from tangibles, ultimately motivating a more individual and effective approach to privacy protection in the future.
@InProceedings{delgado2024chi,
author = {Sarah Delgado Rodriguez AND Priyasha Chatterjee AND Anh Dao Phuong AND Florian Alt AND Karola Marky},
booktitle = {{Proceedings of the 2024 CHI Conference on Human Factors in Computing Systems}},
title = {{Do You Need to Touch? Exploring Correlations between Personal Attributes and Preferences for Tangible Privacy Mechanisms}},
year = {2024},
address = {New York, NY, USA},
note = {delgado2024chi},
publisher = {Association for Computing Machinery},
series = {CHI ’24},
abstract = {This paper explores how personal attributes, such as age, gender, technological expertise, or “need for touch”, correlate with people’s preferences for properties of tangible privacy protection mechanisms, for example, physically covering a camera. For this, we conducted an online survey (N = 444) where we captured participants’ preferences of eight established tangible privacy mechanisms well-known in daily life, their perceptions of effective privacy protection, and personal attributes. We found that the attributes that correlated most strongly with participants’ perceptions of the established tangible privacy mechanisms were their “need for touch” and previous experiences with the mechanisms. We use our findings to identify desirable characteristics of tangible mechanisms to better inform future tangible, digital, and mixed privacy protections. We also show which individuals benefit most from tangibles, ultimately motivating a more individual and effective approach to privacy protection in the future.},
doi = {10.1145/3613904.3642863},
isbn = {979-8-4007-0330-0/24/05},
location = {Honolulu, HI, USA},
timestamp = {2024.05.16},
url = {http://florian-alt.org/unibw/wp-content/publications/delgado2024chi.pdf},
video = {delgado2024chi},
}
Y. Abdrabou, T. Omelina, F. Dietz, M. Khamis, F. Alt, and M. Hassib. Where Do You Look When Unlocking Your Phone? A Field Study of Gaze Behaviour During Smartphone Unlock. In Extended Abstracts of the 2024 CHI Conference on Human Factors in Computing Systems (CHI ’24), Association for Computing Machinery, New York, NY, USA, 2024. doi:10.1145/3613905.3651094
[BibTeX] [Abstract] [PDF] [Video]
Eye gaze has emerged as a promising avenue for implicit authentication/identification on smartphones, offering the potential for seamless user identification and two-factor authentication. However, a crucial gap exists in understanding eye gaze behaviour specifically during smartphone unlocks. This lack of understanding is magnified by scenarios where users’ faces are not fully visible in front cameras, leading to inaccurate gaze estimation. In this work, we conducted a 24-hour in-the-wild study tracking 21 users’ eye gaze during smartphone unlocks. Our findings highlight substantial eye gaze behaviour variations influenced by authentication methods, physical activity, and environment. Our findings provide insights to enhance and adapt implicit user identification/authentication systems based on gaze tracking on smartphones taking into consideration different users’ behaviour, and environmental effects.
@InProceedings{abdrabou2024chilbw,
author = {Yasmeen Abdrabou AND Tatiana Omelina AND Felix Dietz AND Mohamed Khamis AND Florian Alt AND Mariam Hassib},
booktitle = {{Extended Abstracts of the 2024 CHI Conference on Human Factors in Computing Systems}},
title = {{Where Do You Look When Unlocking Your Phone? A Field Study of Gaze Behaviour During Smartphone Unlock}},
year = {2024},
address = {New York, NY, USA},
note = {abdrabou2024chilbw},
publisher = {Association for Computing Machinery},
series = {CHI ’24},
abstract = {Eye gaze has emerged as a promising avenue for implicit authentication/identification on smartphones, offering the potential for seamless user identification and two-factor authentication. However, a crucial gap exists in understanding eye gaze behaviour specifically during smartphone unlocks. This lack of understanding is magnified by scenarios where users' faces are not fully visible in front cameras, leading to inaccurate gaze estimation. In this work, we conducted a 24-hour in-the-wild study tracking 21 users' eye gaze during smartphone unlocks. Our findings highlight substantial eye gaze behaviour variations influenced by authentication methods, physical activity, and environment. Our findings provide insights to enhance and adapt implicit user identification/authentication systems based on gaze tracking on smartphones taking into consideration different users' behaviour, and environmental effects.},
doi = {10.1145/3613905.3651094},
isbn = {979-8-4007-0331-7/24/05},
location = {Honolulu, HI, USA},
timestamp = {2024.05.15},
url = {http://florian-alt.org/unibw/wp-content/publications/abdrabou2024chilbw.pdf},
video = {abdrabou2024chilbw},
}
O. Hein, F. Dietz, B. Pfleging, and F. Alt. BikECG – A VR Bicycle Simulator Concept that Integrates Physiological Data and Tele-Cycling. In Proceedings of the chi 2024 workshop “cyclinghci” (CHI EA ’24), Honolulu, HI, USA, 2024.
[BibTeX] [Abstract] [PDF]
Different fitness levels during group cycling tours pose a challenge and have a negative impact on fairness, motivation and inclusivity within the cycling community. To solve this problem, we present a novel, physiology-adaptive Virtual Reality (VR) bike simulator concept. Using a road bike mounted on a Wahoo KICKR Smart Trainer and a Varjo XR-3 HMD, we implemented a VR bicycle simulator using Python and Unity 3D. Real-time ECG data acquisition via a Polar H10 chest strap enhances adaptive capabilities and provides a versatile framework for investigating the dynamic relationship between virtual experiences and physiological responses. This collaborative project, involving two universities, aims to explore tele-cycling and physiologically adaptive scenarios, with a second simulator under construction to expand the possibilities. The integration of real-time physiological monitoring improves the adaptability of the simulation, making it a valuable tool for studying human responses in VR-based cycling scenarios.
@InProceedings{hein2024chiws,
author = {Oliver Hein AND Felix Dietz AND Bastian Pfleging AND Florian Alt},
booktitle = {Proceedings of the CHI 2024 Workshop "CyclingHCI"},
title = {{BikECG - A VR Bicycle Simulator Concept that Integrates Physiological Data and Tele-Cycling}},
year = {2024},
address = {Honolulu, HI, USA},
note = {hein2024chiws},
series = {CHI EA '24},
abstract = {Different fitness levels during group cycling tours pose a challenge and have a negative impact on fairness, motivation and inclusivity within the cycling community. To solve this problem, we present a novel, physiology-adaptive Virtual Reality (VR) bike simulator concept. Using a road bike mounted on a Wahoo KICKR Smart Trainer and a Varjo XR-3 HMD, we implemented a VR bicycle simulator using Python and Unity 3D. Real-time ECG data acquisition via a Polar H10 chest strap enhances adaptive capabilities and provides a versatile framework for investigating the dynamic relationship between virtual experiences and physiological responses. This collaborative project, involving two universities, aims to explore tele-cycling and physiologically adaptive scenarios, with a second simulator under construction to expand the possibilities. The integration of real-time physiological monitoring improves the adaptability of the simulation, making it a valuable tool for studying human responses in VR-based cycling scenarios.},
timestamp = {2024.04.19},
url = {https://www.unibw.de/usable-security-and-privacy/publikationen/pdf/hein2024chiea.pdf},
}
M. Strauss, V. Paneva, F. Alt, and S. Schneegass. Designing and evaluating scalable privacy awareness and control user. In Proceedings of the chi 2024 workshop “shaping the future: developing principles for policy recommendations for responsible innovation in virtual worlds” (CHI EA ’24), Honolulu, HI, USA, 2024.
[BibTeX] [Abstract] [PDF]
Mixed Reality (MR) headsets hold immense potential for various industries but raise significant privacy concerns due to their data collection capabilities. This paper outlines a research roadmap to address these concerns. Firstly, understanding users’ privacy needs and mental models is crucial for designing effective privacy-preserving user interfaces. Secondly, creating usable privacy control UIs for MR applications is essential to empower users to make informed decisions effortlessly. Thirdly, evaluating the usability and effectiveness of these interfaces is necessary to ensure their efficacy. Finally, establishing real-world testbeds for long-term evaluation of privacy interfaces in users’ everyday lives is crucial. By embedding privacy considerations into MR design and development, this research aims to contribute to a responsible and sustainable XR landscape, where innovation coexists harmoniously with privacy and ethical principles.
@InProceedings{strauss2024chiws,
author = {Marvin Strauss AND Viktorija Paneva AND Florian Alt AND Stefan Schneegass},
booktitle = {Proceedings of the CHI 2024 Workshop "Shaping The Future: Developing Principles for Policy Recommendations for Responsible Innovation in Virtual Worlds"},
title = {Designing and Evaluating Scalable Privacy Awareness and Control User},
year = {2024},
address = {Honolulu, HI, USA},
note = {strauss2024chiws},
series = {CHI EA '24},
abstract = {Mixed Reality (MR) headsets hold immense potential for various industries but raise significant privacy concerns due to their data collection capabilities. This paper outlines a research roadmap to address these concerns. Firstly, understanding users' privacy needs and mental models is crucial for designing effective privacy-preserving user interfaces. Secondly, creating usable privacy control UIs for MR applications is essential to empower users to make informed decisions effortlessly. Thirdly, evaluating the usability and effectiveness of these interfaces is necessary to ensure their efficacy. Finally, establishing real-world testbeds for long-term evaluation of privacy interfaces in users' everyday lives is crucial. By embedding privacy considerations into MR design and development, this research aims to contribute to a responsible and sustainable XR landscape, where innovation coexists harmoniously with privacy and ethical principles.},
timestamp = {2024.04.19},
url = {https://www.unibw.de/usable-security-and-privacy/publikationen/pdf/strauss2024chiea.pdf},
}
V. Paneva and F. Alt. Exploring vulnerabilities in remote vr user studies. In Proceedings of the chi 2024 workshop “shaping the future: developing principles for policy recommendations for responsible innovation in virtual worlds” (CHI EA ’24), Honolulu, HI, USA, 2024.
[BibTeX] [Abstract] [PDF]
This position paper explores the possibilities and challenges of using Virtual Reality (VR) in remote user studies. Highlighting the immersive nature of VR, the paper identifies key vulnerabilities, including varying technical proficiency, privacy concerns, ethical considerations, and data security risks. To address these issues, proposed mitigation strategies encompass comprehensive onboarding, prioritized informed consent, implementing privacy-by-design principles, and adherence to ethical guidelines. Secure data handling, including encryption and disposal protocols, is advocated. In conclusion, while remote VR studies present unique opportunities, carefully considering and implementing mitigation strategies is essential to uphold reliability, ethical integrity, and security, ensuring responsible and effective use of VR in user research. Ongoing efforts are crucial for adapting to the evolving landscape of VR technology in user studies.
@InProceedings{paneva2024chiws,
author = {Viktorija Paneva and Florian Alt},
booktitle = {Proceedings of the CHI 2024 Workshop "Shaping The Future: Developing Principles for Policy Recommendations for Responsible Innovation in Virtual Worlds"},
title = {Exploring Vulnerabilities in Remote VR User Studies},
year = {2024},
address = {Honolulu, HI, USA},
note = {paneva2024chiws},
series = {CHI EA '24},
abstract = {This position paper explores the possibilities and challenges of using Virtual Reality (VR) in remote user studies. Highlighting the immersive nature of VR, the paper identifies key vulnerabilities, including varying technical proficiency, privacy concerns, ethical considerations, and data security risks. To address these issues, proposed mitigation strategies encompass comprehensive onboarding, prioritized informed consent, implementing privacy-by-design principles, and adherence to ethical guidelines. Secure data handling, including encryption and disposal protocols, is advocated. In conclusion, while remote VR studies present unique opportunities, carefully considering and implementing mitigation strategies is essential to uphold reliability, ethical integrity, and security, ensuring responsible and effective use of VR in user research. Ongoing efforts are crucial for adapting to the evolving landscape of VR technology in user studies.},
timestamp = {2024.04.19},
url = {https://www.unibw.de/usable-security-and-privacy/publikationen/pdf/paneva2024chiea.pdf},
}
J. Schwab, A. Nußbaum, A. Sergeeva, F. Alt, and V. Distler. What makes phishing simulation campaigns (un)acceptable? a vignette experiment on the acceptance and manipulation intention related to phishing simulation campaigns. Available at ssrn, 2024. doi:10.2139/ssrn.4737715
[BibTeX] [Abstract] [PDF]
Organizations depend on their employees’ long-term cooperation to protect themselves from threats. The acceptance of cybersecurity training measures is thus crucial. Phishing attacks are the point of entry for harmful follow-up attacks, and many organizations use simulated phishing campaigns to train employees to adopt secure behaviors. We conducted a pre-registered vignette experiment (N=793), investigating the factors that make a simulated phishing campaign seem (un)acceptable, and their influence on intention to manipulate the campaign. In an online experiment, we varied whether employees gave prior consent, whether the phishing email promised a financial incentive and the consequences for employees who clicked on the phishing link. We found that employees’ prior consent had a positive effect on the acceptance of a simulated phishing campaign. The consequences “employee interview” and “termination of the work contract” had a negative effect on acceptance. We found no statistically significant effects of consent, monetary incentive, and consequences on manipulation probability. Few participants described reasons for “manipulating” the campaign, mainly mentioning curiosity. Our results shed light on the factors influencing acceptance of simulated phishing campaigns and provide take-aways for future work in this space.
@Article{schwab2024ssrn,
author = {Schwab, Jasmin and Nußbaum, Alexander and Sergeeva, Anastasia and Alt, Florian and Distler, Verena},
journal = {Available at SSRN},
title = {What Makes Phishing Simulation Campaigns (Un)Acceptable? A Vignette Experiment on the Acceptance and Manipulation Intention Related to Phishing Simulation Campaigns},
year = {2024},
note = {schwab2024ssrn},
abstract = {Organizations depend on their employees' long-term cooperation to protect themselves from threats. The acceptance of cybersecurity training measures is thus crucial. Phishing attacks are the point of entry for harmful follow-up attacks, and many organizations use simulated phishing campaigns to train employees to adopt secure behaviors. We conducted a pre-registered vignette experiment (N=793), investigating the factors that make a simulated phishing campaign seem (un)acceptable, and their influence on intention to manipulate the campaign. In an online experiment, we varied whether employees gave prior consent, whether the phishing email promised a financial incentive and the consequences for employees who clicked on the phishing link. We found that employees' prior consent had a positive effect on the acceptance of a simulated phishing campaign. The consequences ``employee interview'' and ``termination of the work contract'' had a negative effect on acceptance. We found no statistically significant effects of consent, monetary incentive, and consequences on manipulation probability. Few participants described reasons for ``manipulating'' the campaign, mainly mentioning curiosity. Our results shed light on the factors influencing acceptance of simulated phishing campaigns and provide take-aways for future work in this space.},
doi = {10.2139/ssrn.4737715},
timestamp = {2024.03.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schwab2024ssrn.pdf},
}
Y. Abdrabou, M. Hassib, S. Hu, K. Pfeuffer, M. Khamis, A. Bulling, and F. Alt. EyeSeeIdentity: Exploring Natural Gaze Behaviourfor Implicit User Identification during Photo Viewing. In Proceedings of the Usable Security Mini Conference 2024 (USEC’24), Internet Society, San Diego, CA, USA, 2024. doi:https://dx.doi.org/10.14722/usec.2024.23057
[BibTeX] [Abstract] [PDF]
Existing gaze-based methods for user identification either require special-purpose visual stimuli or artificial gaze behavior. Here, we explore how users can be differentiated by analyzing natural gaze behavior while freely looking at images. Our approach is based on the observation that looking at different images, for example, a picture from your last holiday, induces stronger emotional responses that are reflected in gaze behavior and, hence, are unique to the person having experienced that situation. We collected gaze data in a remote study ($N=39$) where participants looked at three image categories: personal images, other people’s images, and random images from the Internet. We demonstrate the potential of identifying different people using machine learning with an accuracy of 85\%. The results pave the way for a new class of authentication methods solely based on natural human gaze behavior.
@InProceedings{abdrabou2024usec,
author = {Yasmeen Abdrabou AND Mariam Hassib AND Shuquin Hu AND Ken Pfeuffer AND Mohamed Khamis AND Andreas Bulling AND Florian Alt},
booktitle = {{Proceedings of the Usable Security Mini Conference 2024}},
title = {{EyeSeeIdentity: Exploring Natural Gaze Behaviourfor Implicit User Identification during Photo Viewing}},
year = {2024},
address = {San Diego, CA, USA},
note = {abdrabou2024usec},
publisher = {Internet Society},
series = {USEC'24},
abstract = {Existing gaze-based methods for user identification either require special-purpose visual stimuli or artificial gaze behavior. Here, we explore how users can be differentiated by analyzing natural gaze behavior while freely looking at images. Our approach is based on the observation that looking at different images, for example, a picture from your last holiday, induces stronger emotional responses that are reflected in gaze behavior and, hence, are unique to the person having experienced that situation. We collected gaze data in a remote study ($N=39$) where participants looked at three image categories: personal images, other people's images, and random images from the Internet. We demonstrate the potential of identifying different people using machine learning with an accuracy of 85\%. The results pave the way for a new class of authentication methods solely based on natural human gaze behavior.},
doi = {https://dx.doi.org/10.14722/usec.2024.23057},
isbn = {79-8-9894372-5-2},
owner = {florian},
timestamp = {2024.02.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdrabou2024usec.pdf},
}
S. Delgado Rodriguez, S. Prange, L. Mecke, and F. Alt. Act2auth – a novel authentication concept based on embedded tangible interaction at desks. In Proceedings of the eighteenth international conference on tangible, embedded, and embodied interaction (TEI ’24), Association for Computing Machinery, New York, NY, USA, 2024. doi:10.1145/3623509.3633360
[BibTeX] [Abstract] [PDF]
Authentication (e.g., entering a password) is frequently perceived as an annoying obstacle when interacting with computational devices, but still essential to protect sensitive data from unauthorized access. We present Act2Auth, a novel concept for embedding authentication into users’ established routines by sensing tangible interactions at desks. With Act2Auth, users can authenticate by performing (secret) routines, such as putting a cup on their desk, rearranging their keyboard, and touching their mouse. The Act2Auth concept is informed by (1) an object analysis of 107 desk photos from Reddit, (2) an online survey (N = 65) investigating users’ strategies for creating touch-based authentication secrets, and (3) a technical exploration of capacitive touch-sensing at desks. We then (4) implemented a prototype and evaluated the usability as well as the memorability of Act2Auth compared to textual passwords (N = 8). With Act2Auth, we provide fundamental work on how to embed authentication tasks into our daily tangible interactions.
@InProceedings{delgado2024tei,
author = {Delgado Rodriguez, Sarah and Prange, Sarah and Mecke, Lukas and Alt, Florian},
booktitle = {Proceedings of the Eighteenth International Conference on Tangible, Embedded, and Embodied Interaction},
title = {Act2Auth – A Novel Authentication Concept based on Embedded Tangible Interaction at Desks},
year = {2024},
address = {New York, NY, USA},
note = {delgado2024tei},
publisher = {Association for Computing Machinery},
series = {TEI '24},
abstract = {Authentication (e.g., entering a password) is frequently perceived as an annoying obstacle when interacting with computational devices, but still essential to protect sensitive data from unauthorized access. We present Act2Auth, a novel concept for embedding authentication into users’ established routines by sensing tangible interactions at desks. With Act2Auth, users can authenticate by performing (secret) routines, such as putting a cup on their desk, rearranging their keyboard, and touching their mouse. The Act2Auth concept is informed by (1) an object analysis of 107 desk photos from Reddit, (2) an online survey (N = 65) investigating users’ strategies for creating touch-based authentication secrets, and (3) a technical exploration of capacitive touch-sensing at desks. We then (4) implemented a prototype and evaluated the usability as well as the memorability of Act2Auth compared to textual passwords (N = 8). With Act2Auth, we provide fundamental work on how to embed authentication tasks into our daily tangible interactions.},
articleno = {12},
doi = {10.1145/3623509.3633360},
isbn = {9798400704024},
keywords = {capacitive sensing, embedded authentication, tangible authentication, tangible security},
location = {, Cork, Ireland, },
numpages = {15},
timestamp = {2024.02.07},
url = {http://www.florian-alt.org/unibw/wp-content/publications/delgado2024tei.pdf},
}
Y. Abdelrahman, F. Alt, T. Dingler, C. Hadnagy, A. Maroño, and V. Distler. Defining and fortifying against cognitive vulnerabilities in social engineering (dagstuhl seminar 23462). , 2024.
[BibTeX] [Abstract]
Social engineering has become the main vector for human-centered cyber attacks, resulting from an unparalleled level of professionalization in the cybercrime industry over the past years. Hereby, through manipulation, criminals seek to make victims take actions that compromise security, such as revealing credentials, issuing payments, or disclosing confidential information. Little effective means for protection exist today against such attacks beyond raising awareness through education. At the same time, the proliferation of sensors in our everyday lives – both in personal devices and in our (smart) environments – provides an unprecedented opportunity for developing solutions assessing the cognitive vulnerabilities of users and serves as a basis for novel means of protection. This report documents the program and the outcomes of the Dagstuhl Seminar 23462 “Defining and Fortifying Against Cognitive Vulnerabilities in Social Engineering”. This 3-day seminar brought together experts from academia, industry, and the authorities working on social engineering. During the seminar, participants developed a common understanding of social engineering, identified grand challenges, worked on a research agenda, and identified ideas for collaborations in the form of research projects and joint initiatives.
@Article{abdelrahman2024defining,
author = {Abdelrahman, Yomna and Alt, Florian and Dingler, Tilman and Hadnagy, Christopher and Maro{\~n}o, Abbie and Distler, Verena},
title = {Defining and Fortifying Against Cognitive Vulnerabilities in Social Engineering (Dagstuhl Seminar 23462)},
year = {2024},
note = {abdelrahman2024defining},
abstract = {Social engineering has become the main vector for human-centered cyber attacks, resulting from an unparalleled level of professionalization in the cybercrime industry over the past years. Hereby, through manipulation, criminals seek to make victims take actions that compromise security, such as revealing credentials, issuing payments, or disclosing confidential information. Little effective means for protection exist today against such attacks beyond raising awareness through education. At the same time, the proliferation of sensors in our everyday lives – both in personal devices and in our (smart) environments – provides an unprecedented opportunity for developing solutions assessing the cognitive vulnerabilities of users and serves as a basis for novel means of protection. This report documents the program and the outcomes of the Dagstuhl Seminar 23462 “Defining and Fortifying Against Cognitive Vulnerabilities in Social Engineering”. This 3-day seminar brought together experts from academia, industry, and the authorities working on social engineering. During the seminar, participants developed a common understanding of social engineering, identified grand challenges, worked on a research agenda, and identified ideas for collaborations in the form of research projects and joint initiatives.},
publisher = {Schloss Dagstuhl--Leibniz-Zentrum f{\"u}r Informatik},
timestamp = {2023.11.20},
}
Y. Abdelrahman, F. Alt, T. Dingler, C. Hadnagy, A. Maroño, and V. Distler. Defining and Fortifying Against Cognitive Vulnerabilities in Social Engineering (Dagstuhl Seminar 23462). Dagstuhl reports, vol. 13, iss. 11, p. 103–129, 2024. doi:10.4230/DagRep.13.11.103
[BibTeX] [PDF]
@Article{abdelrahman2024dagstuhl,
author = {Abdelrahman, Yomna and Alt, Florian and Dingler, Tilman and Hadnagy, Christopher and Maro\~{n}o, Abbie and Distler, Verena},
journal = {Dagstuhl Reports},
title = {{Defining and Fortifying Against Cognitive Vulnerabilities in Social Engineering (Dagstuhl Seminar 23462)}},
year = {2024},
issn = {2192-5283},
note = {abdelrahman2024dagstuhl},
number = {11},
pages = {103--129},
volume = {13},
address = {Dagstuhl, Germany},
annote = {Keywords: Social Engineering, Cognitive Vulnerabilities, Phishing, Vishing},
doi = {10.4230/DagRep.13.11.103},
editor = {Abdelrahman, Yomna and Alt, Florian and Dingler, Tilman and Hadnagy, Christopher and Maro\~{n}o, Abbie and Distler, Verena},
publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik},
url = {https://drops.dagstuhl.de/entities/document/10.4230/DagRep.13.11.103},
urn = {urn:nbn:de:0030-drops-198461},
}

2023

F. Alt, M. Hassib, and V. Distler. Human-centered behavioral and physiological security. In Proceedings of the 2023 workshop on new security paradigms (NSPW ’23), Association for Computing Machinery, New York, NY, USA, 2023. doi:10.1145/3633500.3633504
[BibTeX] [Abstract] [PDF]
We propose a paradigm shift in human-centered security research in which users’ objective behavior and physiological states move into focus. This proposal is motivated by the fact that many per- sonal and wearable devices today come with capabilities that allow researchers to assess users’ behavior and physiology in real-time. We expect substantial advances due to the ability to develop more targeted approaches to human-centered security in which solutions are targeted at individuals’ literacy, skills, and acontext. To this end, the main contribution of this work is a research space: we first pro- vide an overview of common human-centered attacks that could be better understood and addressed through our approach. Based on this overview, we then showcase how specific security habits can benefit from the knowledge of users’ current state. Our work is complemented by a discussion of the implications and research directions enabled through this novel paradigm.
@InProceedings{alt2023nspw,
author = {Alt, Florian AND Hassib, Mariam AND Distler, Verena},
booktitle = {Proceedings of the 2023 Workshop on New Security Paradigms},
title = {Human-centered Behavioral and Physiological Security},
year = {2023},
address = {New York, NY, USA},
note = {alt2023nspw},
publisher = {Association for Computing Machinery},
series = {NSPW '23},
abstract = {We propose a paradigm shift in human-centered security research
in which users’ objective behavior and physiological states move
into focus. This proposal is motivated by the fact that many per-
sonal and wearable devices today come with capabilities that allow
researchers to assess users’ behavior and physiology in real-time.
We expect substantial advances due to the ability to develop more
targeted approaches to human-centered security in which solutions
are targeted at individuals’ literacy, skills, and acontext. To this end,
the main contribution of this work is a research space: we first pro-
vide an overview of common human-centered attacks that could
be better understood and addressed through our approach. Based
on this overview, we then showcase how specific security habits
can benefit from the knowledge of users’ current state. Our work
is complemented by a discussion of the implications and research
directions enabled through this novel paradigm.},
doi = {10.1145/3633500.3633504},
isbn = {979-8-4007-1620-1/23/09},
location = {Segovia, Spain},
timestamp = {2023.12.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2023nspw.pdf},
}
M. Teuschel, D. Pöhn, M. Grabatin, F. Dietz, W. Hommel, and F. Alt. ’don’t annoy me with privacy decisions!’ – designing privacy-preserving user interfaces for ssi wallets on smartphones. Ieee access, vol. 11, pp. 131814-131835, 2023. doi:10.1109/ACCESS.2023.3334908
[BibTeX] [Abstract] [PDF]
Persistent digital identities allow individuals to prove who they are across the Internet. For decades, individuals have relied on large identity providers (e. g., Google and Facebook). In recent years, the advent of so-called self-sovereign identities (SSI) has increasingly been approved by national governments. This decentralized approach provides users with a way to maintain control over the information associated with their identities. Yet, the design of these wallets to enable users to act in a privacy-preserving manner when sharing data with requesting services remains an open question. Based on a qualitative pre-study, we chart the design space for privacy-preserving user interfaces for SSI wallets and explore several designs to understand user adoption and decision-making processes. A qualitative user study (N=16) based on realistic scenarios revealed that while the proposed designs generally increase privacy awareness, participants trade data for convenience. Our study is complemented by guidelines for designers of future user interfaces for smartphone SSI wallets.
@Article{teuschel2023access,
author = {Teuschel, Moritz and Pöhn, Daniela and Grabatin, Michael and Dietz, Felix and Hommel, Wolfgang and Alt, Florian},
journal = {IEEE Access},
title = {’Don’t Annoy Me With Privacy Decisions!’ – Designing Privacy-Preserving User Interfaces for SSI Wallets on Smartphones},
year = {2023},
note = {teuschel2023access},
pages = {131814-131835},
volume = {11},
abstract = {Persistent digital identities allow individuals to prove who they are across the Internet. For decades, individuals have relied on large identity providers (e. g., Google and Facebook). In recent years, the advent of so-called self-sovereign identities (SSI) has increasingly been approved by national governments. This decentralized approach provides users with a way to maintain control over the information associated with their identities. Yet, the design of these wallets to enable users to act in a privacy-preserving manner when sharing data with requesting services remains an open question. Based on a qualitative pre-study, we chart the design space for privacy-preserving user interfaces for SSI wallets and explore several designs to understand user adoption and decision-making processes. A qualitative user study (N=16) based on realistic scenarios revealed that while the proposed designs generally increase privacy awareness, participants trade data for convenience. Our study is complemented by guidelines for designers of future user interfaces for smartphone SSI wallets.},
address = {New York, NY, USA},
doi = {10.1109/ACCESS.2023.3334908},
publisher = {IEEE},
timestamp = {2023.12.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/teuschel2023access.pdf},
}
S. Delgado Rodriguez, A. Dao Phuong, F. Bumiller, L. Mecke, F. Dietz, F. Alt, and M. Hassib. Padlock, the universal security symbol? – exploring symbols and metaphors for privacy and security. In Proceedings of the 22nd international conference on mobile and ubiquitous multimedia (MUM ’23), Association for Computing Machinery, New York, NY, USA, 2023, p. 10–24. doi:10.1145/3626705.3627770
[BibTeX] [Abstract] [PDF]
The use of symbols and metaphors can be a fast and effective way of conveying abstract concepts. At the same time, misconceived symbols can lead to misunderstandings and errors. Therefore, when it comes to privacy and security, clear communication is essential to avoid putting users’ personal data at risk. In this paper, we elicit 32 symbols and metaphors associated with privacy and security through two brainstorming sessions (n = 8, each). Six experts further separated this collection into security and privacy-related symbols and generated clusters based on similarity. Using participants’ clusters, we derived underlying themes. As a result, we present a symbol and metaphor space for privacy and security and discuss their perceived meaning. Our findings can serve researchers, designers, and developers to find suitable symbols or metaphors for a given scenario (e.g., to decide on the interaction metaphor for a tangible security mechanism) and to understand if a symbol is ambiguous or how it may be understood (e.g., is an eye associated with privacy configurations?). Our work provides an initial knowledge base supporting effective communication in this field.
@InProceedings{delgado2023mum,
author = {Delgado Rodriguez, Sarah and Dao Phuong, Anh and Bumiller, Franziska and Mecke, Lukas and Dietz, Felix and Alt, Florian and Hassib, Mariam},
booktitle = {Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia},
title = {Padlock, the Universal Security Symbol? - Exploring Symbols and Metaphors for Privacy and Security},
year = {2023},
address = {New York, NY, USA},
note = {delgado2023mum},
pages = {10–24},
publisher = {Association for Computing Machinery},
series = {MUM '23},
abstract = {The use of symbols and metaphors can be a fast and effective way of conveying abstract concepts. At the same time, misconceived symbols can lead to misunderstandings and errors. Therefore, when it comes to privacy and security, clear communication is essential to avoid putting users’ personal data at risk. In this paper, we elicit 32 symbols and metaphors associated with privacy and security through two brainstorming sessions (n = 8, each). Six experts further separated this collection into security and privacy-related symbols and generated clusters based on similarity. Using participants’ clusters, we derived underlying themes. As a result, we present a symbol and metaphor space for privacy and security and discuss their perceived meaning. Our findings can serve researchers, designers, and developers to find suitable symbols or metaphors for a given scenario (e.g., to decide on the interaction metaphor for a tangible security mechanism) and to understand if a symbol is ambiguous or how it may be understood (e.g., is an eye associated with privacy configurations?). Our work provides an initial knowledge base supporting effective communication in this field.},
doi = {10.1145/3626705.3627770},
isbn = {9798400709210},
keywords = {metaphors, privacy, security, symbols},
location = {Vienna, Austria},
numpages = {15},
timestamp = {2023.12.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/delgado2022mum.pdf},
}
V. Voigt, R. Wiethe, C. Sassmann, M. Will, S. D. Rodriguez, and F. Alt. Safe call: a tangible smartphone interface that supports safe and easy phone calls and contacts management for older people. In Proceedings of the 22nd international conference on mobile and ubiquitous multimedia (MUM ’23), Association for Computing Machinery, New York, NY, USA, 2023, p. 562–564. doi:10.1145/3626705.3631878
[BibTeX] [Abstract] [PDF]
Using a smartphone can be challenging for older people. Basic tasks like managing contacts and making phone calls might be intimidating. Moreover, the rise of scam calls increases the potential dangers of using a smartphone. To address these issues, we introduce Safe Call, a tangible smartphone interface that involves physical contact cards with NFC tags and an accompanying app. The contact cards simplify initiating calls, while the app enhances safety by distinguishing known and unknown callers. Safe Call supports digital accessibility for older people, encourages smartphone adoption, and reduces vulnerability to scams.
@InProceedings{voigt2023mumadj,
author = {Voigt, Vanessa and Wiethe, Raffael and Sassmann, Chanakarn and Will, Moritz and Rodriguez, Sarah Delgado and Alt, Florian},
booktitle = {Proceedings of the 22nd International Conference on Mobile and Ubiquitous Multimedia},
title = {Safe Call: A Tangible Smartphone Interface That Supports Safe and Easy Phone Calls and Contacts Management for Older People},
year = {2023},
address = {New York, NY, USA},
note = {voigt2023mumadj},
pages = {562–564},
publisher = {Association for Computing Machinery},
series = {MUM '23},
abstract = {Using a smartphone can be challenging for older people. Basic tasks like managing contacts and making phone calls might be intimidating. Moreover, the rise of scam calls increases the potential dangers of using a smartphone. To address these issues, we introduce Safe Call, a tangible smartphone interface that involves physical contact cards with NFC tags and an accompanying app. The contact cards simplify initiating calls, while the app enhances safety by distinguishing known and unknown callers. Safe Call supports digital accessibility for older people, encourages smartphone adoption, and reduces vulnerability to scams.},
doi = {10.1145/3626705.3631878},
isbn = {9798400709210},
keywords = {contact management, older, scam prevention, tangible security},
location = {Vienna, Austria},
numpages = {3},
timestamp = {2023.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/voigt2023mumadj.pdf},
}
K. Pfeuffer, J. Obernolte, F. Dietz, V. Mäkelä, L. Sidenmark, P. Manakhov, M. Pakanen, and F. Alt. Palmgazer: unimanual eye-hand menus in augmented reality. In Proceedings of the 2023 acm symposium on spatial user interaction (SUI ’23), Association for Computing Machinery, New York, NY, USA, 2023. doi:10.1145/3607822.3614523
[BibTeX] [Abstract] [PDF]
How can we design the user interfaces for augmented reality (AR) so that we can interact as simple, flexible and expressive as we can with smartphones in one hand? To explore this question, we propose PalmGazer as an interaction concept integrating eye-hand interaction to establish a singlehandedly operable menu system. In particular, PalmGazer is designed to support quick and spontaneous digital commands– such as to play a music track, check notifications or browse visual media – through our devised three-way interaction model: hand opening to summon the menu UI, eye-hand input for selection of items, and dragging gesture for navigation. A key aspect is that it remains always-accessible and movable to the user, as the menu supports meaningful hand and head based reference frames. We demonstrate the concept in practice through a prototypical mobile UI with application probes, and describe technique designs specifically-tailored to the application UI. A qualitative evaluation highlights the system’s interaction benefits and drawbacks, e.g., that common 2D scroll and selection tasks are simple to operate, but higher degrees of freedom may be reserved for two hands. Our work contributes interaction techniques and design insights to expand AR’s uni-manual capabilities.
@InProceedings{pfeuffer2023sui,
author = {Pfeuffer, Ken and Obernolte, Jan and Dietz, Felix and M\"{a}kel\"{a}, Ville and Sidenmark, Ludwig and Manakhov, Pavel and Pakanen, Minna and Alt, Florian},
booktitle = {Proceedings of the 2023 ACM Symposium on Spatial User Interaction},
title = {PalmGazer: Unimanual Eye-Hand Menus in Augmented Reality},
year = {2023},
address = {New York, NY, USA},
note = {pfeuffer2023sui},
publisher = {Association for Computing Machinery},
series = {SUI '23},
abstract = {How can we design the user interfaces for augmented reality (AR) so that we can interact as simple, flexible and expressive as we can with smartphones in one hand? To explore this question, we propose PalmGazer as an interaction concept integrating eye-hand interaction to establish a singlehandedly operable menu system. In particular, PalmGazer is designed to support quick and spontaneous digital commands– such as to play a music track, check notifications or browse visual media – through our devised three-way interaction model: hand opening to summon the menu UI, eye-hand input for selection of items, and dragging gesture for navigation. A key aspect is that it remains always-accessible and movable to the user, as the menu supports meaningful hand and head based reference frames. We demonstrate the concept in practice through a prototypical mobile UI with application probes, and describe technique designs specifically-tailored to the application UI. A qualitative evaluation highlights the system’s interaction benefits and drawbacks, e.g., that common 2D scroll and selection tasks are simple to operate, but higher degrees of freedom may be reserved for two hands. Our work contributes interaction techniques and design insights to expand AR’s uni-manual capabilities.},
articleno = {10},
doi = {10.1145/3607822.3614523},
isbn = {9798400702815},
keywords = {menu, augmented reality, gestures, eye-hand interaction, gaze},
location = {Sydney, NSW, Australia},
numpages = {12},
timestamp = {2023.10.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeuffer2023sui.pdf},
}
R. Radiah, P. Prodan, V. Mäkelä, P. Knierim, and F. Alt. How Are Your Participants Feeling Today? Accounting For and Assessing Emotions in Virtual Reality. In Proceedings of mensch und computer 2023 (MuC ’23), Association for Computing Machinery, New York, NY, USA, 2023, p. 37–48. doi:10.1145/3603555.3603577
[BibTeX] [Abstract] [PDF]
Emotions affect our perception, attention, and behavior. Hereby, the emotional state is greatly affected by the surrounding environment that can seamlessly be designed in Virtual Reality (VR). However, research typically does not account for the influence of the environment on participants’ emotions, even if this influence might alter acquired data. To mitigate the impact, we formulated a design space that explains how the creation of virtual environments influences emotions. Furthermore, we present EmotionEditor, a toolbox that assists researchers in rapidly developing virtual environments that influence and asses the users’ emotional state. We evaluated the capability of EmotionEditor to elicit emotions in a lab study (n=30). Based on interviews with VR experts (n=13), we investigate how they consider the effect of emotions in their research, how the EmotionEditor can prospectively support them, and analyze prevalent challenges in the design as well as development of VR user studies.
@InProceedings{rivu2023muc,
author = {Radiah, Rivu and Prodan, Pia and M\"{a}kel\"{a}, Ville and Knierim, Pascal and Alt, Florian},
booktitle = {Proceedings of Mensch Und Computer 2023},
title = {{How Are Your Participants Feeling Today? Accounting For and Assessing Emotions in Virtual Reality}},
year = {2023},
address = {New York, NY, USA},
note = {rivu2023muc},
pages = {37–48},
publisher = {Association for Computing Machinery},
series = {MuC '23},
abstract = {Emotions affect our perception, attention, and behavior. Hereby, the emotional state is greatly affected by the surrounding environment that can seamlessly be designed in Virtual Reality (VR). However, research typically does not account for the influence of the environment on participants’ emotions, even if this influence might alter acquired data. To mitigate the impact, we formulated a design space that explains how the creation of virtual environments influences emotions. Furthermore, we present EmotionEditor, a toolbox that assists researchers in rapidly developing virtual environments that influence and asses the users’ emotional state. We evaluated the capability of EmotionEditor to elicit emotions in a lab study (n=30). Based on interviews with VR experts (n=13), we investigate how they consider the effect of emotions in their research, how the EmotionEditor can prospectively support them, and analyze prevalent challenges in the design as well as development of VR user studies.},
doi = {10.1145/3603555.3603577},
isbn = {9798400707711},
keywords = {virtual reality, virtual environments, user studies, emotions},
location = {Rapperswil, Switzerland},
numpages = {12},
timestamp = {2023.09.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2023muc.pdf},
}
Y. Abdrabou, L. Mecke, R. Rivu, S. Prange, Q. D. Nguyen, V. Voigt, F. Alt, and K. Pfeuffer. How Unique do we Move? Understanding the Human Body and Context Factors for User Identification. In Proceedings of mensch und computer 2023 (MuC ’23), Association for Computing Machinery, New York, NY, USA, 2023, p. 127–137. doi:10.1145/3603555.3603574
[BibTeX] [Abstract] [PDF]
Past work showed great promise in biometric user identification and authentication through exploiting specific features of specific body parts. We investigate human motion across the whole body, to explore what parts of the body exhibit more unique movement patterns, and are more suitable to identify users in general. We collect and analyze full-body motion data across various activities (e.g., sitting, standing), handheld objects (uni- or bimanual), and tasks (e.g., watching TV or walking). Our analysis shows, e.g., that gait as a strong feature amplifies when carrying items, game activity elicits more unique behaviors than texting on a smartphone, and motion features are robust across body parts whereas posture features are more robust across tasks. Our work provides a holistic reference on how context affects human motion to identify us across a variety of factors, useful to inform researchers and practitioners of behavioral biometric systems on a large scale.
@InProceedings{abdrabou2023muc,
author = {Abdrabou, Yasmeen and Mecke, Lukas and Rivu, Radiah and Prange, Sarah and Nguyen, Quy Dat and Voigt, Vanessa and Alt, Florian and Pfeuffer, Ken},
booktitle = {Proceedings of Mensch Und Computer 2023},
title = {{How Unique do we Move? Understanding the Human Body and Context Factors for User Identification}},
year = {2023},
address = {New York, NY, USA},
note = {abdrabou2023muc},
pages = {127–137},
publisher = {Association for Computing Machinery},
series = {MuC '23},
abstract = {Past work showed great promise in biometric user identification and authentication through exploiting specific features of specific body parts. We investigate human motion across the whole body, to explore what parts of the body exhibit more unique movement patterns, and are more suitable to identify users in general. We collect and analyze full-body motion data across various activities (e.g., sitting, standing), handheld objects (uni- or bimanual), and tasks (e.g., watching TV or walking). Our analysis shows, e.g., that gait as a strong feature amplifies when carrying items, game activity elicits more unique behaviors than texting on a smartphone, and motion features are robust across body parts whereas posture features are more robust across tasks. Our work provides a holistic reference on how context affects human motion to identify us across a variety of factors, useful to inform researchers and practitioners of behavioral biometric systems on a large scale.},
doi = {10.1145/3603555.3603574},
isbn = {9798400707711},
keywords = {Authentication, Full-body Motion, User Identification, Context},
location = {Rapperswil, Switzerland},
numpages = {11},
timestamp = {2023.09.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdrabou2023muc.pdf},
}
O. Hein, P. Rauschnabel, M. Hassib, and F. Alt. Sick in the Car, Sick in VR?: Understanding how Real-World Susceptibility to Dizziness, Nausea and Eye Strain Influences VR Motion Sickness. In Proceedings of the 19th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’23), Springer Nature, Cham, Switzerland, 2023.
[BibTeX] [Abstract] [PDF]
A substantial number of Virtual Reality (VR) users (studies report 30-80%) suffer from cyber sickness, a negative experience caused by a sensory mismatch of real and virtual stimuli. Prior research proposed different mitigation strategies. Yet, it remains unclear how effectively they work, considering users’ real-world susceptibility to motion sickness. We present a lab experiment, in which we assessed 146 users‘ real-world susceptibility to nausea, dizziness and eye strain before exposing them to a roller coaster ride with low or high visual resolution. We found that nausea is significantly lower for higher resolution but real world motion susceptibility has a much stronger effect on dizziness, nausea, and eye strain. Our work points towards a need for research investigating the effectiveness of approaches to mitigate motion sickness so as not to include them from VR use and access to the metaverse.
@InProceedings{hein2023interact,
author = {Oliver Hein AND Philipp Rauschnabel AND Mariam Hassib AND Florian Alt},
booktitle = {{Proceedings of the 19th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Sick in the Car, Sick in VR?: Understanding how Real-World Susceptibility to Dizziness, Nausea and Eye Strain Influences VR Motion Sickness}},
year = {2023},
address = {Cham, Switzerland},
editor = {Abdelnour Nocera, Jos{\'e} and Krist{\'i}n L{\'a}rusd{\'o}ttir, Marta and Petrie, Helen and Piccinno, Antonio and Winckler, Marco},
month = {4},
note = {hein2023interact},
publisher = {Springer Nature},
series = {INTERACT '23},
abstract = {A substantial number of Virtual Reality (VR) users (studies report 30-80%) suffer from cyber sickness, a negative experience caused by a sensory mismatch of real and virtual stimuli. Prior research proposed different mitigation strategies. Yet, it remains unclear how effectively they work, considering users’ real-world susceptibility to motion sickness. We present a lab experiment, in which we assessed 146 users‘ real-world susceptibility to nausea, dizziness and eye strain before exposing them to a roller coaster ride with low or high visual resolution. We found that nausea is significantly lower for higher resolution but real world motion susceptibility has a much stronger effect on dizziness, nausea, and eye strain. Our work points towards a need for research investigating the effectiveness of approaches to mitigate motion sickness so as not to include them from VR use and access to the metaverse.},
isbn = {978-3-031-42283-6},
language = {English},
location = {York, United Kingdom},
owner = {florian},
timestamp = {2023.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hein2023interact.pdf},
}
Y. Abdrabou, M. Asbeck, K. Pfeuffer, Y. Abdelrahman, M. Hassib, and F. Alt. Empowering Users: Leveraging Interface Cues to Enhance Password Security. In Proceedings of the 19th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’23), Springer, Berlin-Heidelberg, Germany, 2023.
[BibTeX] [Abstract] [PDF]
Passwords are a popular means of authentication for online accounts, but users struggle to come up with and remember numerous passwords, resorting to insecure coping strategies. Previous research on graphical authentication schemes demonstrated that modifying the authentication interface can encourage more secure passwords. In this study (N=59), we explored the use of implicit (website background and advertisements) and explicit (word suggestions) cues to influence password composition. We found that 60.59% of passwords were influenced by the interface cues. This highlights the potential for future authentication interface design to nudge users towards creating more secure passwords. Our work discusses how designers can use these findings to improve authentication interfaces for better password security.
@InProceedings{abdrabou2023interact,
author = {Yasmeen Abdrabou AND Marco Asbeck AND Ken Pfeuffer AND Yomna Abdelrahman AND Mariam Hassib AND Florian Alt},
booktitle = {{Proceedings of the 19th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Empowering Users: Leveraging Interface Cues to Enhance Password Security}},
year = {2023},
address = {Berlin-Heidelberg, Germany},
month = {4},
note = {abdrabou2023interact},
publisher = {Springer},
series = {INTERACT '23},
abstract = {Passwords are a popular means of authentication for online accounts, but users struggle to come up with and remember numerous passwords, resorting to insecure coping strategies. Previous research on graphical authentication schemes demonstrated that modifying the authentication interface can encourage more secure passwords. In this study (N=59), we explored the use of implicit (website background and advertisements) and explicit (word suggestions) cues to influence password composition. We found that 60.59% of passwords were influenced by the interface cues. This highlights the potential for future authentication interface design to nudge users towards creating more secure passwords. Our work discusses how designers can use these findings to improve authentication interfaces for better password security.},
day = {1},
language = {English},
location = {York, United Kingdom},
owner = {florian},
timestamp = {2023.09.01},
url = {.http://www.florian-alt.org/unibw/wp-content/publications/abdrabou2023interact.pdf},
}
S. Prange and F. Alt, “Increasing users’ privacy awareness in the internet of things: design space and sample scenarios,” in Human factors in privacy research, N. Gerber, A. Stöver, and K. Marky, Eds., Cham: Springer International Publishing, 2023, p. 321–336. doi:10.1007/978-3-031-28643-8_16
[BibTeX] [Abstract] [PDF]
An increasing number of devices and sensors in the environments we access daily are capable of collecting personal data about us. Surveillance cameras in public spaces, smart speakers in friends’ living rooms, or smartphones carried by individuals are just a few examples. At the same time, many users are unaware of sensors being in place, in particular, those deployed in unfamiliar environments. Hence, it becomes increasingly challenging for users to keep control over their personal data being tracked and/or processed. Crucially, for users to be able to make informed decisions and privacy choices, they first of all need to be aware of potential privacy intrusions in their surroundings. In this chapter, we address this by exploring means to increase users’ privacy awareness in the Internet of Things. In particular, we illustrate the design space for such privacy awareness mechanisms, including what information should be displayed, and how this information can be made accessible for various target groups such as (to-be) device owners or passers-by. We also introduce and compare three sample scenarios in which privacy awareness mechanisms can support users: (1) privacy-relevant information for purchase decisions, (2) on-demand privacy-relevant information for active device search, and (3) in situ privacy-relevant information and guidance. The chapter is complemented by a discussion on future approaches to raising privacy awareness.
@InBook{prange2023springer,
author = {Prange, Sarah and Alt, Florian},
editor = {Gerber, Nina and St{\"o}ver, Alina and Marky, Karola},
pages = {321--336},
publisher = {Springer International Publishing},
title = {Increasing Users' Privacy Awareness in the Internet of Things: Design Space and Sample Scenarios},
year = {2023},
address = {Cham},
isbn = {978-3-031-28643-8},
note = {prange2023springer},
abstract = {An increasing number of devices and sensors in the environments we access daily are capable of collecting personal data about us. Surveillance cameras in public spaces, smart speakers in friends' living rooms, or smartphones carried by individuals are just a few examples. At the same time, many users are unaware of sensors being in place, in particular, those deployed in unfamiliar environments. Hence, it becomes increasingly challenging for users to keep control over their personal data being tracked and/or processed. Crucially, for users to be able to make informed decisions and privacy choices, they first of all need to be aware of potential privacy intrusions in their surroundings. In this chapter, we address this by exploring means to increase users' privacy awareness in the Internet of Things. In particular, we illustrate the design space for such privacy awareness mechanisms, including what information should be displayed, and how this information can be made accessible for various target groups such as (to-be) device owners or passers-by. We also introduce and compare three sample scenarios in which privacy awareness mechanisms can support users: (1) privacy-relevant information for purchase decisions, (2) on-demand privacy-relevant information for active device search, and (3) in situ privacy-relevant information and guidance. The chapter is complemented by a discussion on future approaches to raising privacy awareness.},
booktitle = {Human Factors in Privacy Research},
doi = {10.1007/978-3-031-28643-8_16},
timestamp = {2023.08.19},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2023springer.pdf},
}
Y. Hu Fleischhauer, H. B. Surale, F. Alt, and K. Pfeuffer. Gaze-based mode-switching to enhance interaction with menus on tablets. In Proceedings of the 2023 ACM Symposium on Eye Tracking Research & Applications (ETRA ’23), Association for Computing Machinery, New York, NY, USA, 2023. doi:10.1145/3588015.3588409
[BibTeX] [Abstract] [PDF]
In design work, a common task is the interaction with menus to change the drawing mode. Done frequently, this can become a tedious and fatiguing task, especially for tablets where users physically employ a stylus or finger touch. As our eyes are naturally involved in visual search and acquisition of desired menu items, we propose gaze to shortcut the physical movement. We investigate gaze-based mode-switching for menus in tablets by a novel mode-switching methodology, assessing a gaze-only (dwell-time) and multimodal (gaze and tap) technique, compared to hand-based interaction. The results suggest that users can efficiently alternate between manual and eye input when interacting with the menu; both gaze-based techniques have lower physical demand and individual speed-error trade-offs. This led to a novel technique that substantially reduces time by unifying mode-selection and mode-application. Our work points to new roles for our eyes to efficiently short-cut menu actions during the workflow.
@InProceedings{fleischhauer2023etra,
author = {Hu Fleischhauer, Yanfei and Surale, Hemant Bhaskar and Alt, Florian and Pfeuffer, Ken},
booktitle = {{Proceedings of the 2023 ACM Symposium on Eye Tracking Research \& Applications}},
title = {Gaze-Based Mode-Switching to Enhance Interaction with Menus on Tablets},
year = {2023},
address = {New York, NY, USA},
note = {fleischhauer2023etra},
publisher = {Association for Computing Machinery},
series = {ETRA '23},
abstract = {In design work, a common task is the interaction with menus to change the drawing mode. Done frequently, this can become a tedious and fatiguing task, especially for tablets where users physically employ a stylus or finger touch. As our eyes are naturally involved in visual search and acquisition of desired menu items, we propose gaze to shortcut the physical movement. We investigate gaze-based mode-switching for menus in tablets by a novel mode-switching methodology, assessing a gaze-only (dwell-time) and multimodal (gaze and tap) technique, compared to hand-based interaction. The results suggest that users can efficiently alternate between manual and eye input when interacting with the menu; both gaze-based techniques have lower physical demand and individual speed-error trade-offs. This led to a novel technique that substantially reduces time by unifying mode-selection and mode-application. Our work points to new roles for our eyes to efficiently short-cut menu actions during the workflow.},
articleno = {7},
doi = {10.1145/3588015.3588409},
isbn = {9798400701504},
keywords = {pen, touch, gaze, mode switching, tablet, mobile device, menu interface},
location = {Tübingen, Germany},
numpages = {8},
timestamp = {2023.05.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/fleischhauer2023etra.pdf},
}
O. Namnakani, P. Sinrattanavong, Y. Abdrabou, A. Bulling, F. Alt, and M. Khamis. Gazecast: using mobile devices to allow gaze-based interaction on public displays. In Proceedings of the 2023 ACM Symposium on Eye Tracking Research & Applications (ETRA ’23), Association for Computing Machinery, New York, NY, USA, 2023. doi:10.1145/3588015.3589663
[BibTeX] [Abstract] [PDF]
Gaze is promising for natural and spontaneous interaction with public displays, but current gaze-enabled displays require movement-hindering stationary eye trackers or cumbersome head-mounted eye trackers. We propose and evaluate GazeCast – a novel system that leverages users’ handheld mobile devices to allow gaze-based interaction with surrounding displays. In a user study (N = 20), we compared GazeCast to a standard webcam for gaze-based interaction using Pursuits. We found that while selection using GazeCast requires more time and physical demand, participants value GazeCast’s high accuracy and flexible positioning. We conclude by discussing how mobile computing can facilitate the adoption of gaze interaction with pervasive displays.
@InProceedings{namnakani2023cogain,
author = {Namnakani, Omar and Sinrattanavong, Penpicha and Abdrabou, Yasmeen and Bulling, Andreas and Alt, Florian and Khamis, Mohamed},
booktitle = {{Proceedings of the 2023 ACM Symposium on Eye Tracking Research \& Applications}},
title = {GazeCast: Using Mobile Devices to Allow Gaze-Based Interaction on Public Displays},
year = {2023},
address = {New York, NY, USA},
note = {namnakani2023cogain},
publisher = {Association for Computing Machinery},
series = {ETRA '23},
abstract = {Gaze is promising for natural and spontaneous interaction with public displays, but current gaze-enabled displays require movement-hindering stationary eye trackers or cumbersome head-mounted eye trackers. We propose and evaluate GazeCast – a novel system that leverages users’ handheld mobile devices to allow gaze-based interaction with surrounding displays. In a user study (N = 20), we compared GazeCast to a standard webcam for gaze-based interaction using Pursuits. We found that while selection using GazeCast requires more time and physical demand, participants value GazeCast’s high accuracy and flexible positioning. We conclude by discussing how mobile computing can facilitate the adoption of gaze interaction with pervasive displays.},
articleno = {92},
doi = {10.1145/3588015.3589663},
isbn = {9798400701504},
keywords = {Public Displays, Mobile Devices, Gaze Interaction, Pursuits, Eye Tracking},
location = {Tübingen, Germany},
numpages = {8},
timestamp = {2023.05.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/namnakani2023cogain.pdf},
}
Y. Abdrabou, F. Dietz, A. Shams, P. Knierim, Y. Abdelrahman, K. Pfeuffer, M. Hassib, and F. Alt, Revealing the hidden effects of phishing emails: an analysis of eye and mouse movements in email sorting tasks, 2023. doi:10.48550/arXiv.2305.17044 Focus to learn more
[BibTeX] [Abstract] [PDF]
Users are the last line of defense as phishing emails pass filter mechanisms. At the same time, phishing emails are designed so that they are challenging to identify by users. To this end, attackers employ techniques, such as eliciting stress, targeting helpfulness, or exercising authority, due to which users often miss being manipulated out of malicious intent. This work builds on the assumption that manipulation techniques, even if going unnoticed by users, still lead to changes in their behavior. In this work, we present the outcomes of an online study in which we collected gaze and mouse movement data during an email sorting task. Our findings show that phishing emails lead to significant differences across behavioral features but depend on the nature of the email. We discuss how our findings can be leveraged to build security mechanisms protecting users and companies from phishing.
@Misc{abdrabou2023arxiv,
author = {Yasmeen Abdrabou and Felix Dietz and Ahmed Shams and Pascal Knierim and Yomna Abdelrahman and Ken Pfeuffer and Mariam Hassib and Florian Alt},
howpublished = {arXiv.org},
month = jun,
note = {abdrabou2023arxiv},
title = {Revealing the Hidden Effects of Phishing Emails: An Analysis of Eye and Mouse Movements in Email Sorting Tasks},
year = {2023},
abstract = {Users are the last line of defense as phishing emails pass filter mechanisms. At the same time, phishing emails are designed so that they are challenging to identify by users. To this end, attackers employ techniques, such as eliciting stress, targeting helpfulness, or exercising authority, due to which users often miss being manipulated out of malicious intent. This work builds on the assumption that manipulation techniques, even if going unnoticed by users, still lead to changes in their behavior. In this work, we present the outcomes of an online study in which we collected gaze and mouse movement data during an email sorting task. Our findings show that phishing emails lead to significant differences across behavioral features but depend on the nature of the email. We discuss how our findings can be leveraged to build security mechanisms protecting users and companies from phishing.},
archiveprefix = {arXiv},
doi = {10.48550/arXiv.2305.17044
Focus to learn more},
eprint = {2305.17044},
primaryclass = {cs.HC},
timestamp = {2023.05.23},
url = {http://florian-alt.org/unibw/wp-content/publications/abdrabou2023arxiv.pdf},
}
A. Saad, K. Izadi, A. A. Khan, P. Knierim, S. Schneegass, F. Alt, and Y. Abdelrahman. `HotFoot’: Foot-Based User Identification Using Thermal Imaging. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems (CHI ’23), Association for Computing Machinery, New York, NY, USA, 2023. doi:10.1145/3544548.3580924
[BibTeX] [Abstract] [PDF] [Video]
We propose a novel method for seamlessly identifying users by combining thermal and visible feet features. While it is known that users’ feet have unique characteristics, these have so far been underutilized for biometric identification, as observing those features often requires the removal of shoes and socks. As thermal cameras are becoming ubiquitous, we foresee a new form of identification, using feet features and heat traces to reconstruct the footprint even while wearing shoes or socks. We collected a dataset of users’ feet (N = 21), wearing three types of footwear (personal shoes, standard shoes, and socks) on three floor types (carpet, laminate, and linoleum). By combining visual and thermal features, an AUC between 91.1\% and 98.9\%, depending on floor type and shoe type can be achieved, with personal shoes on linoleum floor performing best. Our findings demonstrate the potential of thermal imaging for continuous and unobtrusive user identification.
@InProceedings{saad2023chi,
author = {Alia Saad AND Kian Izadi AND Khan, Anam Ahmad AND Pascal Knierim AND Stefan Schneegass AND Florian Alt AND Yomna Abdelrahman},
booktitle = {{Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems}},
title = {{`HotFoot': Foot-Based User Identification Using Thermal Imaging}},
year = {2023},
address = {New York, NY, USA},
note = {saad2023chi},
publisher = {Association for Computing Machinery},
series = {CHI ’23},
abstract = {We propose a novel method for seamlessly identifying users by combining thermal and visible feet features. While it is known that users’ feet have unique characteristics, these have so far been underutilized for biometric identification, as observing those features often requires the removal of shoes and socks. As thermal cameras are becoming ubiquitous, we foresee a new form of identification, using feet features and heat traces to reconstruct the footprint even while wearing shoes or socks. We collected a dataset of users’ feet (N = 21), wearing three types of footwear (personal shoes, standard shoes, and socks) on three floor types (carpet, laminate, and linoleum). By combining visual and thermal features, an AUC between 91.1\% and 98.9\%, depending on floor type and shoe type can be achieved, with personal shoes on linoleum floor performing best. Our findings demonstrate the potential of thermal imaging for continuous and unobtrusive user identification.},
doi = {10.1145/3544548.3580924},
isbn = {9781450394215},
location = {Hamburg, Germany},
timestamp = {2023.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/saad2023chi.pdf},
video = {saad2023chi},
}
R. Radiah, D. Roth, F. Alt, and Y. Abdelrahman. The influence of avatar personalization on emotions in vr. Multimodal technologies and interaction, vol. 7, iss. 4, 2023. doi:10.3390/mti7040038
[BibTeX] [Abstract] [PDF]
In this paper, we investigate the impact of avatar personalization on perceived emotions. Avatar embodiment is a crucial aspect of collaborative and social virtual reality (VR) systems. Previous research found that avatar appearance impacts the acceptability of the virtual body and changes users’ behavior. While virtual embodiment has been extensively investigated, we know very little about how embodiment affects users’ experienced emotions. In a user study (N = 40), we applied an autobiographical recall method to evoke happiness and investigated the influence of different types of avatar embodiment (personalized same-gender, personalized opposite-gender, non-personalized same-gender, and non-personalized opposite-gender) on participants’ perceived emotion. We recorded both self-reported assessments and physiological data to observe participants’ emotional responses resulting from the emotions elicited by the use of different avatars. We found significant differences in happiness with the personalized same-gender avatar and the personalized opposite-gender avatar. We provide empirical evidence, demonstrating the influence of avatar personalization on emotions in VR. We conclude with recommendations for users and designers of virtual reality experiences.
@Article{rivu2023mti,
author = {Radiah, Rivu and Roth, Daniel and Alt, Florian and Abdelrahman, Yomna},
journal = {Multimodal Technologies and Interaction},
title = {The Influence of Avatar Personalization on Emotions in VR},
year = {2023},
issn = {2414-4088},
note = {rivu2023mti},
number = {4},
volume = {7},
abstract = {In this paper, we investigate the impact of avatar personalization on perceived emotions. Avatar embodiment is a crucial aspect of collaborative and social virtual reality (VR) systems. Previous research found that avatar appearance impacts the acceptability of the virtual body and changes users’ behavior. While virtual embodiment has been extensively investigated, we know very little about how embodiment affects users’ experienced emotions. In a user study (N = 40), we applied an autobiographical recall method to evoke happiness and investigated the influence of different types of avatar embodiment (personalized same-gender, personalized opposite-gender, non-personalized same-gender, and non-personalized opposite-gender) on participants’ perceived emotion. We recorded both self-reported assessments and physiological data to observe participants’ emotional responses resulting from the emotions elicited by the use of different avatars. We found significant differences in happiness with the personalized same-gender avatar and the personalized opposite-gender avatar. We provide empirical evidence, demonstrating the influence of avatar personalization on emotions in VR. We conclude with recommendations for users and designers of virtual reality experiences.},
article-number = {38},
doi = {10.3390/mti7040038},
timestamp = {2023.04.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2023mti.pdf},
}
L. Mecke, I. Prieto Romero, S. Delgado Rodriguez, and F. Alt. Exploring the use of electromagnets to influence key targeting on physical keyboards. In Extended abstracts of the 2023 chi conference on human factors in computing systems (CHI EA ’23), Association for Computing Machinery, New York, NY, USA, 2023. doi:10.1145/3544549.3585703
[BibTeX] [Abstract] [PDF]
In this work, we explore the use of force induced through electromagnets to influence finger movement while using a keyboard. To achieve this we generate a magnetic field below a keyboard and place a permanent magnet on the user’s finger as a minimally invasive approach to dynamically induce variable force. Contrary to other approaches our setup can thus generate forces even at a distance from the keyboard. We explore this concept by building a prototype and analyzing different configurations of electromagnets (i.e., attraction and repulsion) and placements of a permanent magnet on the user’s fingers in a preliminary study (N=4). Our force measurements show that we can induce 3.56 N at a distance of 10 mm. Placing the magnet on the index finger allowed for influencing key press times and was perceived as comfortable. Finally, we discuss implications and potential application areas like mid-air feedback and guidance.
@InProceedings{mecke2023chiea,
author = {Mecke, Lukas and Prieto Romero, Ismael and Delgado Rodriguez, Sarah and Alt, Florian},
booktitle = {Extended Abstracts of the 2023 CHI Conference on Human Factors in Computing Systems},
title = {Exploring the Use of Electromagnets to Influence Key Targeting on Physical Keyboards},
year = {2023},
address = {New York, NY, USA},
note = {mecke2023chiea},
publisher = {Association for Computing Machinery},
series = {CHI EA '23},
abstract = {In this work, we explore the use of force induced through electromagnets to influence finger movement while using a keyboard. To achieve this we generate a magnetic field below a keyboard and place a permanent magnet on the user’s finger as a minimally invasive approach to dynamically induce variable force. Contrary to other approaches our setup can thus generate forces even at a distance from the keyboard. We explore this concept by building a prototype and analyzing different configurations of electromagnets (i.e., attraction and repulsion) and placements of a permanent magnet on the user’s fingers in a preliminary study (N=4). Our force measurements show that we can induce 3.56 N at a distance of 10 mm. Placing the magnet on the index finger allowed for influencing key press times and was perceived as comfortable. Finally, we discuss implications and potential application areas like mid-air feedback and guidance.},
articleno = {128},
doi = {10.1145/3544549.3585703},
isbn = {9781450394222},
keywords = {output, typing, electromagnets, keyboards},
location = {Hamburg, Germany},
numpages = {8},
timestamp = {2023.04.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2023chiws.pdf},
}
V. Distler, Y. Abdrabou, F. Dietz, and F. Alt. Triggering empathy out of malicious intent: the role of empathy in social engineering attacks. In Proceedings of the 2nd empathy-centric design workshop (EMPATHICH ’23), Association for Computing Machinery, New York, NY, USA, 2023. doi:10.1145/3588967.3588969
[BibTeX] [Abstract] [PDF]
Social engineering is a popular attack vector among cyber criminals. During such attacks, impostors often attempt to trigger empathy to manipulate victims into taking dangerous actions, for example, sharing their credentials or clicking on malicious email attachments. The objective of this position paper is to initiate a conversation on the tension between positive and negative aspects of empathy in HCI as it pertains to security-relevant behaviors. To this end, we focus on the malicious ways in which empathy can be instrumentalized in social engineering. We describe examples of such empathy-related social engineering attacks, explore potential solutions (including the automated detection of empathy-triggering communication, or of empathetic communication on the part of a potential victim), and discuss technical, social as well as organizational interventions. We highlight research challenges and directions for future work.
@InProceedings{distler2023empatichi,
author = {Distler, Verena and Abdrabou, Yasmeen and Dietz, Felix and Alt, Florian},
booktitle = {Proceedings of the 2nd Empathy-Centric Design Workshop},
title = {Triggering Empathy out of Malicious Intent: The Role of Empathy in Social Engineering Attacks},
year = {2023},
address = {New York, NY, USA},
note = {distler2023empatichi},
publisher = {Association for Computing Machinery},
series = {EMPATHICH '23},
abstract = {Social engineering is a popular attack vector among cyber criminals. During such attacks, impostors often attempt to trigger empathy to manipulate victims into taking dangerous actions, for example, sharing their credentials or clicking on malicious email attachments. The objective of this position paper is to initiate a conversation on the tension between positive and negative aspects of empathy in HCI as it pertains to security-relevant behaviors. To this end, we focus on the malicious ways in which empathy can be instrumentalized in social engineering. We describe examples of such empathy-related social engineering attacks, explore potential solutions (including the automated detection of empathy-triggering communication, or of empathetic communication on the part of a potential victim), and discuss technical, social as well as organizational interventions. We highlight research challenges and directions for future work.},
articleno = {2},
doi = {10.1145/3588967.3588969},
isbn = {9798400707490},
keywords = {Social Engineering, Empathy, Security},
location = {Hamburg, Germany},
numpages = {6},
timestamp = {2023.04.20},
url = {https://electrofab.prototyping.id/assets/papers/distler2023empatichi.pdf},
}
J. Liebers, U. Gruenefeld, D. Buschek, F. Alt, and S. Schneegass. Introduction to authentication using behavioral biometrics. In Extended abstracts of the 2023 chi conference on human factors in computing systems (CHI EA ’23), Association for Computing Machinery, New York, NY, USA, 2023. doi:10.1145/3544549.3574190
[BibTeX] [Abstract] [PDF]
The trend of ubiquitous computing goes in parallel with ubiquitous authentication, as users must confirm their identity several times a day on their devices. Passwords are increasingly superseded by biometrics for their inherent drawbacks, and Behavioral Biometrics are particularly promising for increased usability and user experience. This course provides participants with an introduction to the overall topic, covering all phases of creating novel authentication schemes. We introduce important aspects of evaluating Behavioral Biometrics and provide an overview of technical machine-learning techniques in a hands-on session, inviting practitioners and researchers to extend their knowledge of Behavioral Biometrics.
@InProceedings{liebers2023chiea,
author = {Liebers, Jonathan and Gruenefeld, Uwe and Buschek, Daniel and Alt, Florian and Schneegass, Stefan},
booktitle = {Extended Abstracts of the 2023 CHI Conference on Human Factors in Computing Systems},
title = {Introduction to Authentication Using Behavioral Biometrics},
year = {2023},
address = {New York, NY, USA},
note = {liebers2023chiea},
publisher = {Association for Computing Machinery},
series = {CHI EA '23},
abstract = {The trend of ubiquitous computing goes in parallel with ubiquitous authentication, as users must confirm their identity several times a day on their devices. Passwords are increasingly superseded by biometrics for their inherent drawbacks, and Behavioral Biometrics are particularly promising for increased usability and user experience. This course provides participants with an introduction to the overall topic, covering all phases of creating novel authentication schemes. We introduce important aspects of evaluating Behavioral Biometrics and provide an overview of technical machine-learning techniques in a hands-on session, inviting practitioners and researchers to extend their knowledge of Behavioral Biometrics.},
articleno = {547},
doi = {10.1145/3544549.3574190},
isbn = {9781450394222},
keywords = {human-computer interaction, identification, usable security, machine learning, authentication},
location = {Hamburg, Germany},
numpages = {4},
timestamp = {2023.04.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/lieber2023chiea.pdf},
}
S. Delgado Rodriguez, O. Hein, I. P. Romero, L. Mecke, F. Dietz, S. Prange, and F. Alt. Shake-it-all: a toolkit for sensing tangible interactions on everyday objects. In Workshop beyond prototyping boards: future paradigms for electronics toolkits (CHI’23 Workshops), 2023.
[BibTeX] [PDF]
@InProceedings{delgado2023chiws,
author = {Delgado Rodriguez, Sarah and Hein, Oliver and Romero, Ismael Prieto and Mecke, Lukas and Dietz, Felix and Prange, Sarah and Alt, Florian},
booktitle = {Workshop Beyond Prototyping Boards: Future Paradigms for Electronics Toolkits},
title = {Shake-It-All: A Toolkit for Sensing Tangible Interactions on Everyday Objects},
year = {2023},
note = {delgado2023chiws},
series = {CHI'23 Workshops},
timestamp = {2023.04.19},
url = {http://www.florian-alt.org/unibw/wp-content/publications/delgado2023chiws.pdf},
}
S. Delgado Rodriguez, R. Rivu, V. Mäkelä, and F. Alt. Challenges in virtual reality studies: ethics and internal and external validity. In Proceedings of the augmented humans international conference 2023 (AHs ’23), Association for Computing Machinery, New York, NY, USA, 2023, p. 105–111. doi:10.1145/3582700.3582716
[BibTeX] [Abstract] [PDF]
User studies on human augmentation nowadays frequently involve virtual reality (VR) technology. This is because VR studies allow augmentations of the human body or senses to be evaluated virtually without having to develop elaborate physical prototypes. However, there are many challenges in VR studies that stem from a multitude of factors. In this paper, we first discuss different types of VR studies and suggest high-level terminology to facilitate further discussions in this space. Then, we derive challenges from the literature that researchers might face when conducting research with VR technology. In particular, we discuss ethics, internal validity, external validity, the technological capabilities of VR hardware, and the costs of VR studies. We further discuss how the challenges might apply to different types of VR studies, and formulate recommendations.
@InProceedings{delgado2023ahs,
author = {Delgado Rodriguez, Sarah and Rivu, Radiah and M\"{a}kel\"{a}, Ville and Alt, Florian},
booktitle = {Proceedings of the Augmented Humans International Conference 2023},
title = {Challenges in Virtual Reality Studies: Ethics and Internal and External Validity},
year = {2023},
address = {New York, NY, USA},
note = {delgado2023ahs},
pages = {105–111},
publisher = {Association for Computing Machinery},
series = {AHs '23},
abstract = {User studies on human augmentation nowadays frequently involve virtual reality (VR) technology. This is because VR studies allow augmentations of the human body or senses to be evaluated virtually without having to develop elaborate physical prototypes. However, there are many challenges in VR studies that stem from a multitude of factors. In this paper, we first discuss different types of VR studies and suggest high-level terminology to facilitate further discussions in this space. Then, we derive challenges from the literature that researchers might face when conducting research with VR technology. In particular, we discuss ethics, internal validity, external validity, the technological capabilities of VR hardware, and the costs of VR studies. We further discuss how the challenges might apply to different types of VR studies, and formulate recommendations.},
doi = {10.1145/3582700.3582716},
isbn = {9781450399845},
keywords = {Ethics, User Study, Virtual Reality, Validity, Virtual Reality Studies, Challenges},
location = {Glasgow, United Kingdom},
numpages = {7},
timestamp = {2023.03.14},
url = {http://www.florian-alt.org/unibw/wp-content/publications/delgado2023ahs.pdf},
}
Y. Abdrabou, E. Karypidou, F. Alt, and M. Hassib. Investigating User Behaviour Towards Fake News on Social Media Using Eye Tracking and Mouse Movements. In Proceedings of the Usable Security Mini Conference 2023 (USEC’23), Internet Society, San Diego, CA, USA, 2023. doi:https://dx.doi.org/10.14722/usec.2023.232041
[BibTeX] [Abstract] [PDF]
We propose an approach to identify users’ exposure to fake news from users’ gaze and mouse movement behavior. Our approach is meant as an enabler for interventions that make users aware of engaging with fake news while not being consciously aware of this. Our work is motivated by the rapid spread of fake news on the web (in particular, social media) and the difficulty and effort required to identify fake content, either technically or by means of a human fact checker. To this end, we set out with conducting a remote online study ($N=54$) in which participants were exposed to real and fake social media posts while their mouse and gaze movements were recorded. We identify the most predictive gaze and mouse movement features and show that fake news can be predicted with 68.4\% accuracy from users’ gaze and mouse movement behavior. Our work is complemented by discussing the implications of using behavioral features for mitigating the spread of fake news on social media.
@InProceedings{abdrabou2023usec,
author = {Yasmeen Abdrabou AND Elisaveta Karypidou AND Florian Alt AND Mariam Hassib},
booktitle = {{Proceedings of the Usable Security Mini Conference 2023}},
title = {{Investigating User Behaviour Towards Fake News on Social Media Using Eye Tracking and Mouse Movements}},
year = {2023},
address = {San Diego, CA, USA},
note = {abdrabou2023usec},
publisher = {Internet Society},
series = {USEC'23},
abstract = {We propose an approach to identify users' exposure to fake news from users' gaze and mouse movement behavior. Our approach is meant as an enabler for interventions that make users aware of engaging with fake news while not being consciously aware of this. Our work is motivated by the rapid spread of fake news on the web (in particular, social media) and the difficulty and effort required to identify fake content, either technically or by means of a human fact checker. To this end, we set out with conducting a remote online study ($N=54$) in which participants were exposed to real and fake social media posts while their mouse and gaze movements were recorded. We identify the most predictive gaze and mouse movement features and show that fake news can be predicted with 68.4\% accuracy from users' gaze and mouse movement behavior. Our work is complemented by discussing the implications of using behavioral features for mitigating the spread of fake news on social media.},
doi = {https://dx.doi.org/10.14722/usec.2023.232041},
isbn = {1-891562-91-6},
owner = {florian},
timestamp = {2023.02.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdrabou2023usec.pdf},
}
S. Mansour, P. Knierim, J. O’Hagan, F. Alt, and F. Mathis. BANS: Evaluation of Bystander Awareness Notification Systems for Productivity in VR. In Proceedings of the Usable Security Mini Conference 2023 (USEC’23), Internet Society, San Diego, CA, USA, 2023. doi:https://dx.doi.org/10.14722/usec.2023.234566
[BibTeX] [Abstract] [PDF]
VR Head-Mounted Displays (HMDs) provide unlimited and personalized virtual workspaces and will enable working anytime and anywhere. However, if HMDs are to become ubiquitous, VR users are at risk of being observed, which can threaten their privacy. We examine six \underline{B}ystander \underline{A}wareness \underline{N}otification \underline{S}ystems (BANS) to enhance VR users’ bystander awareness whilst immersed in VR. % VR productivity tasks in public spaces. In a user study (N=28), we explore how future HMDs equipped with BANS might enable users to maintain their privacy while contributing towards enjoyable and productive travels. Results indicate that BANS increase VR users’ bystander awareness without affecting presence and productivity. Users prefer BANS that extract and present the most details of reality to facilitate their bystander awareness. We conclude by synthesizing four recommendations, such as providing VR users with control over BANS and considering how VR users can best transition between realities, to inform the design of privacy-preserving HMDs.
@InProceedings{mansour2023usec,
author = {Shady Mansour AND Pascal Knierim AND Joseph O'Hagan AND Florian Alt AND Florian Mathis},
booktitle = {{Proceedings of the Usable Security Mini Conference 2023}},
title = {{BANS: Evaluation of Bystander Awareness Notification Systems for Productivity in VR}},
year = {2023},
address = {San Diego, CA, USA},
note = {mansour2023usec},
publisher = {Internet Society},
series = {USEC'23},
abstract = {VR Head-Mounted Displays (HMDs) provide unlimited and personalized virtual workspaces and will enable working anytime and anywhere. However, if HMDs are to become ubiquitous, VR users are at risk of being observed, which can threaten their privacy. We examine six \underline{B}ystander \underline{A}wareness \underline{N}otification \underline{S}ystems (BANS) to enhance VR users' bystander awareness whilst immersed in VR. % VR productivity tasks in public spaces. In a user study (N=28), we explore how future HMDs equipped with BANS might enable users to maintain their privacy while contributing towards enjoyable and productive travels. Results indicate that BANS increase VR users' bystander awareness without affecting presence and productivity. Users prefer BANS that extract and present the most details of reality to facilitate their bystander awareness. We conclude by synthesizing four recommendations, such as providing VR users with control over BANS and considering how VR users can best transition between realities, to inform the design of privacy-preserving HMDs.},
doi = {https://dx.doi.org/10.14722/usec.2023.234566},
isbn = {1-891562-91-6},
owner = {florian},
timestamp = {2023.02.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mansour2023usec.pdf},
}
M. Froehlich, J. Vega, A. Pahl, S. Lotz, F. Alt, A. Schmidt, and I. Welpe. Prototyping with blockchain: a case study for teaching blockchain application development at university. In Learning in the age of digital and green transition (ICL ’22), Springer International Publishing, Cham, 2023, p. 1005–1017. doi:10.1007/978-3-031-26876-2_94
[BibTeX] [Abstract] [PDF]
Blockchain technology is believed to have a potential for innovation comparable to the early internet. However, it is difficult to understand, learn, and use. A particular challenge for teaching software engineering of blockchain applications is identifying suitable use cases: When does a decentralized application running on smart contracts offer advantages over a classic distributed software architecture? This question extends the realms of software engineering and connects to fundamental economic aspects of ownership and incentive systems. The lack of usability of today’s blockchain applications indicates that often applications without a clear advantage are developed. At the same time, there exists little information for educators on how to teach applied blockchain application development. We argue that an interdisciplinary teaching approach can address these issues and equip the next generation of blockchain developers with the skills and entrepreneurial mindset to build valuable and usable products. To this end, we developed, conducted, and evaluated an interdisciplinary capstone-like course grounded in the design sprint method with N = 11 graduate students. Our pre-/post evaluation indicates high efficacy: Participants improved across all measured learning dimensions, particularly use-case identification and blockchain prototyping in teams. We contribute the syllabus, a detailed evaluation, and lessons learned for educators.
@InProceedings{froehlich2022icl,
author = {Froehlich, Michael and Vega, Jose and Pahl, Amelie and Lotz, Sergej and Alt, Florian and Schmidt, Albrecht and Welpe, Isabell},
booktitle = {Learning in the Age of Digital and Green Transition},
title = {Prototyping with Blockchain: A Case Study for Teaching Blockchain Application Development at University},
year = {2023},
note = {froehlich2022icl},
address = {Cham},
editor = {Auer, Michael E. and Pachatz, Wolfgang and R{\"u}{\"u}tmann, Tiia},
pages = {1005--1017},
publisher = {Springer International Publishing},
series = {ICL '22},
abstract = {Blockchain technology is believed to have a potential for innovation comparable to the early internet. However, it is difficult to understand, learn, and use. A particular challenge for teaching software engineering of blockchain applications is identifying suitable use cases: When does a decentralized application running on smart contracts offer advantages over a classic distributed software architecture? This question extends the realms of software engineering and connects to fundamental economic aspects of ownership and incentive systems. The lack of usability of today's blockchain applications indicates that often applications without a clear advantage are developed. At the same time, there exists little information for educators on how to teach applied blockchain application development. We argue that an interdisciplinary teaching approach can address these issues and equip the next generation of blockchain developers with the skills and entrepreneurial mindset to build valuable and usable products. To this end, we developed, conducted, and evaluated an interdisciplinary capstone-like course grounded in the design sprint method with N = 11 graduate students. Our pre-/post evaluation indicates high efficacy: Participants improved across all measured learning dimensions, particularly use-case identification and blockchain prototyping in teams. We contribute the syllabus, a detailed evaluation, and lessons learned for educators.},
doi = {10.1007/978-3-031-26876-2_94},
isbn = {978-3-031-26876-2},
timestamp = {2022-12-01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/froehlich2022icl.pdf},
}

2022

S. Delgado Rodriguez, S. Prange, P. Knierim, K. Marky, and F. Alt. Experiencing tangible privacy control for smart homes with prikey. In Proceedings of the 21st international conference on mobile and ubiquitous multimedia (MUM ’22), Association for Computing Machinery, New York, NY, USA, 2022, p. 298–300. doi:10.1145/3568444.3570585
[BibTeX] [Abstract] [PDF]
Existing software-based smart home privacy mechanisms are frequently indirect and cumbersome to use. We developed PriKey, a tangible privacy mechanism for smart homes that offers intuitive, device-independent, sensor-based, and user-centric privacy control. To render our concept comprehensible, we implemented a demonstration consisting of Wizard-of-Oz prototypes that show the envisioned form factor, size, and portability of our system, as well as a larger functional prototype of PriKey, which enables control of privacy-invasive sensors integrated into two exemplary smart devices, i.e., a smart speaker and a tablet.
@InProceedings{delgado2022mumadj,
author = {Delgado Rodriguez, Sarah and Prange, Sarah and Knierim, Pascal and Marky, Karola and Alt, Florian},
booktitle = {Proceedings of the 21st International Conference on Mobile and Ubiquitous Multimedia},
title = {Experiencing Tangible Privacy Control for Smart Homes with PriKey},
year = {2022},
address = {New York, NY, USA},
note = {delgado2022mumadj},
pages = {298–300},
publisher = {Association for Computing Machinery},
series = {MUM '22},
abstract = {Existing software-based smart home privacy mechanisms are frequently indirect and cumbersome to use. We developed PriKey, a tangible privacy mechanism for smart homes that offers intuitive, device-independent, sensor-based, and user-centric privacy control. To render our concept comprehensible, we implemented a demonstration consisting of Wizard-of-Oz prototypes that show the envisioned form factor, size, and portability of our system, as well as a larger functional prototype of PriKey, which enables control of privacy-invasive sensors integrated into two exemplary smart devices, i.e., a smart speaker and a tablet.},
doi = {10.1145/3568444.3570585},
isbn = {9781450398206},
keywords = {tangible, bystander, privacy, tangible privacy, smart home},
location = {Lisbon, Portugal},
numpages = {3},
timestamp = {2022.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/delgado2022mumadj.pdf},
}
R. Rivu, H. Bayerl, P. Knierim, and F. Alt. ‘can you set it up on your own?’ – investigating users’ ability to participate in remote-based virtual reality studies. In Proceedings of the 21st international conference on mobile and ubiquitous multimedia (MUM ’22), Association for Computing Machinery, New York, NY, USA, 2022, p. 121–127. doi:10.1145/3568444.3568462
[BibTeX] [Abstract] [PDF] [Talk]
The availability of consumer-grade virtual reality (VR) devices allows user studies to be conducted remotely, that is in users’ homes. In this way, diverse populations can be reached and studies using virtual reality can be conducted in settings characterized by high ecologic validity. In this study (N=21) we investigate challenges participants face as they are required to set up and calibrate a virtual reality system in their home without assistance from experimenters. This allowed us to identify key reasons why participants struggle with this task. Our findings suggest that providing illustrative instructions and additional assistance on request can notably increase the success rate of setting up a VR environment for participating in a remote study. Interestingly, we also find that it is harder to recruit participants who do not have prior VR experience to participate in remote VR studies. We derive suggestions on how to support this task based on our findings.
@InProceedings{rivu2022mum,
author = {Rivu, Radiah and Bayerl, Helena and Knierim, Pascal and Alt, Florian},
booktitle = {Proceedings of the 21st International Conference on Mobile and Ubiquitous Multimedia},
title = {‘Can You Set It Up On Your Own?’ – Investigating Users’ Ability To Participate in Remote-Based Virtual Reality Studies},
year = {2022},
address = {New York, NY, USA},
note = {rivu2022mum},
pages = {121–127},
publisher = {Association for Computing Machinery},
series = {MUM '22},
abstract = {The availability of consumer-grade virtual reality (VR) devices allows user studies to be conducted remotely, that is in users’ homes. In this way, diverse populations can be reached and studies using virtual reality can be conducted in settings characterized by high ecologic validity. In this study (N=21) we investigate challenges participants face as they are required to set up and calibrate a virtual reality system in their home without assistance from experimenters. This allowed us to identify key reasons why participants struggle with this task. Our findings suggest that providing illustrative instructions and additional assistance on request can notably increase the success rate of setting up a VR environment for participating in a remote study. Interestingly, we also find that it is harder to recruit participants who do not have prior VR experience to participate in remote VR studies. We derive suggestions on how to support this task based on our findings.},
doi = {10.1145/3568444.3568462},
isbn = {9781450398206},
keywords = {Virtual Reality, Remote User Study},
location = {Lisbon, Portugal},
numpages = {7},
talk = {alt2022mum},
timestamp = {2022.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2022mum.pdf},
}
S. Prange, S. D. Rodriguez, L. Mecke, and F. Alt. “i saw your partner naked”: exploring privacy challenges during video-based online meetings. In Proceedings of the 21st international conference on mobile and ubiquitous multimedia (MUM ’22), Association for Computing Machinery, New York, NY, USA, 2022, p. 71–82. doi:10.1145/3568444.3568468
[BibTeX] [Abstract] [PDF]
Video-based online meetings and, ultimately, the amount of private information that is shared – intentionally or accidentally – increased as a result of the COVID-19 pandemic. For example, online teaching might reveal lecturers’ private environment to students or business meetings might provide insights about employees’ family relationships. This raises the need to understand users’ perception towards privacy intrusion during online video conferences to inform concepts that better protect meeting participants’ privacy. We present the results of an online survey (N = 140) in which we investigate user stories of privacy-invasive situations in their homes during such meetings. Our results show that online meetings reveal private information that would not have become available during physical meetings. This often involves third parties (e.g., children, spouse, colleague), who might not even be aware of this. We discuss potential means to support users in protecting their and others’ privacy before, during, and after video-based online meetings.
@InProceedings{prange2022mum,
author = {Prange, Sarah and Rodriguez, Sarah Delgado and Mecke, Lukas and Alt, Florian},
booktitle = {Proceedings of the 21st International Conference on Mobile and Ubiquitous Multimedia},
title = {“I Saw Your Partner Naked”: Exploring Privacy Challenges During Video-Based Online Meetings},
year = {2022},
address = {New York, NY, USA},
note = {prange2022mum},
pages = {71–82},
publisher = {Association for Computing Machinery},
series = {MUM '22},
abstract = {Video-based online meetings and, ultimately, the amount of private information that is shared – intentionally or accidentally – increased as a result of the COVID-19 pandemic. For example, online teaching might reveal lecturers’ private environment to students or business meetings might provide insights about employees’ family relationships. This raises the need to understand users’ perception towards privacy intrusion during online video conferences to inform concepts that better protect meeting participants’ privacy. We present the results of an online survey (N = 140) in which we investigate user stories of privacy-invasive situations in their homes during such meetings. Our results show that online meetings reveal private information that would not have become available during physical meetings. This often involves third parties (e.g., children, spouse, colleague), who might not even be aware of this. We discuss potential means to support users in protecting their and others’ privacy before, during, and after video-based online meetings.},
doi = {10.1145/3568444.3568468},
isbn = {9781450398206},
keywords = {COVID-19, online meeting privacy, privacy, privacy mechanisms, online meetings},
location = {Lisbon, Portugal},
numpages = {12},
timestamp = {2022.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2022mum.pdf},
}
T. Le, F. Dietz, K. Pfeuffer, and F. Alt. A practical method to eye-tracking on the phone: toolkit, accuracy and precision. In Proceedings of the 21st international conference on mobile and ubiquitous multimedia (MUM ’22), Association for Computing Machinery, New York, NY, USA, 2022, p. 182–188. doi:10.1145/3568444.3568463
[BibTeX] [Abstract] [PDF]
While eye-tracking has become a core asset for many computing environments, mobile phones, as a prime computing device, are lacking a practical platform to conduct eye-tracking studies and develop gaze applications easily. In this work, we aim to tackle this issue by investigating a system concept that allows for the deployment of remote eye-trackers for mobile devices. We describe a toolkit that supports eye-tracking in mobile apps based on a simple phone, PC, and remote eye tracker setup. We evaluate our approach through a technical evaluation of accuracy and precision in various user contexts important for mobility (sitting, standing, walking, lying). Our results show that eye-trackers can be easily used with high accuracy, and how it is impacted through body posture and motions of the user. Our work paves the way for enabling easy-to-use eye-tracking studies on mobile devices.
@InProceedings{le2022mum,
author = {Le, Thanh and Dietz, Felix and Pfeuffer, Ken and Alt, Florian},
booktitle = {Proceedings of the 21st International Conference on Mobile and Ubiquitous Multimedia},
title = {A Practical Method to Eye-Tracking on the Phone: Toolkit, Accuracy and Precision},
year = {2022},
address = {New York, NY, USA},
note = {le2022mum},
pages = {182–188},
publisher = {Association for Computing Machinery},
series = {MUM '22},
abstract = {While eye-tracking has become a core asset for many computing environments, mobile phones, as a prime computing device, are lacking a practical platform to conduct eye-tracking studies and develop gaze applications easily. In this work, we aim to tackle this issue by investigating a system concept that allows for the deployment of remote eye-trackers for mobile devices. We describe a toolkit that supports eye-tracking in mobile apps based on a simple phone, PC, and remote eye tracker setup. We evaluate our approach through a technical evaluation of accuracy and precision in various user contexts important for mobility (sitting, standing, walking, lying). Our results show that eye-trackers can be easily used with high accuracy, and how it is impacted through body posture and motions of the user. Our work paves the way for enabling easy-to-use eye-tracking studies on mobile devices.},
doi = {10.1145/3568444.3568463},
isbn = {9781450398206},
keywords = {accuracy, toolkit, eye tracking, gaze detection, precision},
location = {Lisbon, Portugal},
numpages = {7},
timestamp = {2022.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/le2022mum.pdf},
}
S. Schneegass, A. Saad, R. Heger, S. Delgado Rodriguez, R. Poguntke, and F. Alt. An Investigation of Shoulder Surfing Attacks on Touch-Based Unlock Events. Proceedings of the acm on human-computer interaction, vol. 6, iss. MHCI, 2022. doi:10.1145/3546742
[BibTeX] [Abstract] [PDF]
This paper contributes to our understanding of user-centered attacks on smartphones. In particular, we investigate the likelihood of so-called shoulder surfing attacks during touch-based unlock events and provide insights into users’ views and perceptions. To do so, we ran a two-week in-the-wild study (N=12) in which we recorded images with a 180-degree field of view lens that was mounted on the smartphone’s front-facing camera. In addition, we collected contextual information and allowed participants to assess the situation. We found that only a small fraction of shoulder surfing incidents that occur during authentication are actually perceived as threatening. Furthermore, our findings suggest that our notions of (un)safe places need to be rethought. Our work is complemented by a discussion of implications for future user-centered attack-aware systems. This work can serve as a basis for usable security researchers to better design systems against user-centered attacks.
@Article{schneegass2022mobilehci,
author = {Schneegass, Stefan and Saad, Alia and Heger, Roman and Delgado Rodriguez, Sarah and Poguntke, Romina and Alt, Florian},
journal = {Proceedings of the ACM on Human-Computer Interaction},
title = {{An Investigation of Shoulder Surfing Attacks on Touch-Based Unlock Events}},
year = {2022},
month = {sep},
note = {schneegass2022mobilehci},
number = {MHCI},
volume = {6},
abstract = {This paper contributes to our understanding of user-centered attacks on smartphones. In particular, we investigate the likelihood of so-called shoulder surfing attacks during touch-based unlock events and provide insights into users' views and perceptions. To do so, we ran a two-week in-the-wild study (N=12) in which we recorded images with a 180-degree field of view lens that was mounted on the smartphone's front-facing camera. In addition, we collected contextual information and allowed participants to assess the situation. We found that only a small fraction of shoulder surfing incidents that occur during authentication are actually perceived as threatening. Furthermore, our findings suggest that our notions of (un)safe places need to be rethought. Our work is complemented by a discussion of implications for future user-centered attack-aware systems. This work can serve as a basis for usable security researchers to better design systems against user-centered attacks.},
address = {New York, NY, USA},
articleno = {207},
doi = {10.1145/3546742},
issue_date = {September 2022},
keywords = {shoulder surfing, usable security and privacy, user-centered attacks, in-the-wild studies.},
numpages = {14},
publisher = {Association for Computing Machinery},
timestamp = {2022.10.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2022mobilehci.pdf},
}
M. Froehlich, J. A. Vega Vermehren, F. Alt, and A. Schmidt. Implementation and evaluation of a point-of-sale payment system using bitcoin lightning. In Proceedings of the 12th Nordic Conference on Human-Computer Interaction: (NordiCHI ’22), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3546155.3546700
[BibTeX] [Abstract] [PDF]
Cryptocurrencies have the potential to improve financial inclusion. However, the technology is complex to understand and difficult to use. Human-Computer-Interaction (HCI) can play a vital role in improving accessibility by identifying and overcoming challenges that hold users back. However, most HCI studies have focused only on Bitcoin and Ethereum so far. Newer blockchains promise transaction speeds comparable to traditional payment systems, enabling the use of cryptocurrencies as a medium of exchange for everyday transactions. To explore the viability of cryptocurrency-based point-of-sale solutions through a human-centered lens, we used Bitcoin Lightning to implement a payment system and evaluated it in a mixed-methods study. Our results show that Bitcoin Lightning is a usable alternative to traditional solutions and that friction aggregates at the interface to existing payment systems, i.e. when purchasing Bitcoin. We discuss qualitative insights and derive implications for deploying cryptocurrencies as payment solutions.
@InProceedings{froehlich2022nordichi,
author = {Froehlich, Michael and Vega Vermehren, Jose Adrian and Alt, Florian and Schmidt, Albrecht},
booktitle = {{Proceedings of the 12th Nordic Conference on Human-Computer Interaction:}},
title = {Implementation and Evaluation of a Point-Of-Sale Payment System Using Bitcoin Lightning},
year = {2022},
address = {New York, NY, USA},
note = {froehlich2022nordichi},
publisher = {Association for Computing Machinery},
series = {NordiCHI '22},
abstract = {Cryptocurrencies have the potential to improve financial inclusion. However, the technology is complex to understand and difficult to use. Human-Computer-Interaction (HCI) can play a vital role in improving accessibility by identifying and overcoming challenges that hold users back. However, most HCI studies have focused only on Bitcoin and Ethereum so far. Newer blockchains promise transaction speeds comparable to traditional payment systems, enabling the use of cryptocurrencies as a medium of exchange for everyday transactions. To explore the viability of cryptocurrency-based point-of-sale solutions through a human-centered lens, we used Bitcoin Lightning to implement a payment system and evaluated it in a mixed-methods study. Our results show that Bitcoin Lightning is a usable alternative to traditional solutions and that friction aggregates at the interface to existing payment systems, i.e. when purchasing Bitcoin. We discuss qualitative insights and derive implications for deploying cryptocurrencies as payment solutions.},
articleno = {16},
doi = {10.1145/3546155.3546700},
isbn = {9781450396998},
keywords = {cryptocurrency, payment study, point-of-sale, pos, bitcoin, bitcoin lightning, blockchain},
location = {Aarhus, Denmark},
numpages = {12},
timestamp = {2022.10.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/froehlich2022nordichi.pdf},
}
A. Esteves, E. Bouquet, K. Pfeuffer, and F. Alt. One-Handed Input for Mobile Devices via Motion Matching and Orbits Controls. Proceedings of the ACM on Interactive Mobile Wearable and Ubiquitous Technologies, vol. 6, iss. 2, 2022. doi:10.1145/3534624
[BibTeX] [Abstract] [PDF]
We introduce a novel one-handed input technique for mobile devices that is not based on pointing, but on motion matching -where users select a target by mimicking its unique animation. Our work is motivated by the findings of a survey (N=201) on current mobile use, from which we identify lingering opportunities for one-handed input techniques. We follow by expanding on current motion matching implementations – previously developed in the context of gaze or mid-air input – so these take advantage of the affordances of touch-input devices. We validate the technique by characterizing user performance via a standard selection task (N=24) where we report success rates (>95%), selection times (~1.6 s), input footprint, grip stability, usability, and subjective workload – in both phone and tablet conditions. Finally, we present a design space that illustrates six ways in which motion matching can be embedded into mobile interfaces via a camera prototype application.
@Article{esteves2022imwut,
author = {Esteves, Augusto and Bouquet, Elizabeth and Pfeuffer, Ken and Alt, Florian},
journal = {{Proceedings of the ACM on Interactive Mobile Wearable and Ubiquitous Technologies}},
title = {{One-Handed Input for Mobile Devices via Motion Matching and Orbits Controls}},
year = {2022},
month = {jul},
note = {esteves2022imwut},
number = {2},
volume = {6},
abstract = {We introduce a novel one-handed input technique for mobile devices that is not based on pointing, but on motion matching -where users select a target by mimicking its unique animation. Our work is motivated by the findings of a survey (N=201) on current mobile use, from which we identify lingering opportunities for one-handed input techniques. We follow by expanding on current motion matching implementations - previously developed in the context of gaze or mid-air input - so these take advantage of the affordances of touch-input devices. We validate the technique by characterizing user performance via a standard selection task (N=24) where we report success rates (>95%), selection times (~1.6 s), input footprint, grip stability, usability, and subjective workload - in both phone and tablet conditions. Finally, we present a design space that illustrates six ways in which motion matching can be embedded into mobile interfaces via a camera prototype application.},
address = {New York, NY, USA},
articleno = {51},
doi = {10.1145/3534624},
issue_date = {July 2022},
keywords = {mobile use survey, reachability, orbits, one-handed input, motion matching},
numpages = {24},
publisher = {Association for Computing Machinery},
timestamp = {2022.10.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/esteves2022imwut.pdf},
}
S. D. Rodriguez, S. Prange, C. V. Ossenberg, M. Henkel, F. Alt, and K. Marky. PriKey – Investigating Tangible Privacy Control for Smart Home Inhabitants and Visitors . In Proceedings of the 12th Nordic Conference on Human-Computer Interaction: (NordiCHI ’22), Association for Computing Machinery, New York, NY, USA, 2022.
[BibTeX] [Abstract] [PDF]
Abstract
@InProceedings{delgado2022nordichi,
author = {Sarah Delgado Rodriguez AND Sarah Prange AND Christina Vergara Ossenberg AND Markus Henkel AND Florian Alt AND Karola Marky},
booktitle = {{Proceedings of the 12th Nordic Conference on Human-Computer Interaction:}},
title = {{PriKey – Investigating Tangible Privacy Control for Smart Home Inhabitants and Visitors }},
year = {2022},
address = {New York, NY, USA},
note = {delgado2022nordichi},
publisher = {Association for Computing Machinery},
series = {NordiCHI '22},
abstract = {Abstract},
location = {Denmark},
timestamp = {2022.10.19},
url = {http://www.florian-alt.org/unibw/wp-content/publications/delgado2022nordichi.pdf},
}
M. Froehlich, J. A. V. Vermehren, F. Alt, and A. Schmidt. Supporting Interface Experimentation for Blockchain Applications. In Proceedings of the 12th Nordic Conference on Human-Computer Interaction: (NordiCHI ’22), Association for Computing Machinery, New York, NY, USA, 2022.
[BibTeX] [Abstract] [PDF]
Abstract
@InProceedings{froehlich2022nordichiadj,
author = {Michael Froehlich AND Jose Adrian Vega Vermehren AND Florian Alt AND Albrecht Schmidt},
booktitle = {{Proceedings of the 12th Nordic Conference on Human-Computer Interaction:}},
title = {{Supporting Interface Experimentation for Blockchain Applications}},
year = {2022},
address = {New York, NY, USA},
note = {froehlich2022nordichiadj},
publisher = {Association for Computing Machinery},
series = {NordiCHI '22},
abstract = {Abstract},
location = {Denmark},
timestamp = {2022.10.19},
url = {http://www.florian-alt.org/unibw/wp-content/publications/froehlich2022nordichiadj.pdf},
}
L. Goetz, R. Rivu, F. Alt, A. Schmidt, and V. Maekelae. Real-World Methods of Autobiographical Recall in Virtual Reality. In Proceedings of the 12th Nordic Conference on Human-Computer Interaction: (NordiCHI ’22), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3546155.3546704
[BibTeX] [Abstract] [PDF]
Abstract
@InProceedings{goetz2022nordichi,
author = {Laura Goetz AND Radiah Rivu AND Florian Alt AND Albrecht Schmidt AND Ville Maekelae},
booktitle = {{Proceedings of the 12th Nordic Conference on Human-Computer Interaction:}},
title = {{Real-World Methods of Autobiographical Recall in Virtual Reality}},
year = {2022},
address = {New York, NY, USA},
note = {goetz2022nordichi},
publisher = {Association for Computing Machinery},
series = {NordiCHI '22},
abstract = {Abstract},
doi = {10.1145/3546155.3546704},
location = {Denmark},
timestamp = {2022.10.19},
url = {http://www.florian-alt.org/unibw/wp-content/publications/goetz2022nordichi.pdf},
}
C. Sudar, M. Froehlich, and F. Alt, Trueyes: utilizing microtasks in mobile apps for crowdsourced labeling of machine learning datasetsarXiv, 2022. doi:10.48550/ARXIV.2209.14708
[BibTeX] [PDF]
@Misc{sudar2022arxiv,
author = {Sudar, Chandramohan and Froehlich, Michael and Alt, Florian},
howpublished = {arXiv.org},
note = {sudar2022arxiv},
title = {TruEyes: Utilizing Microtasks in Mobile Apps for Crowdsourced Labeling of Machine Learning Datasets},
year = {2022},
copyright = {arXiv.org perpetual, non-exclusive license},
doi = {10.48550/ARXIV.2209.14708},
keywords = {Human-Computer Interaction (cs.HC), Artificial Intelligence (cs.AI), Software Engineering (cs.SE), FOS: Computer and information sciences},
publisher = {arXiv},
timestamp = {2022.09.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/surdar2022arxiv.pdf},
}
Y. Ma, Y. Abdelrahman, B. Petz, H. Drewes, F. Alt, H. Hussmann, and A. Butz. Enthusiasts, Pragmatists, and Sceptics: Investigating Users’ Attitudes Towards Emotion- and Personality-aware Voice Assistants across Cultures. In Proceedings of the Conference on Mensch Und Computer (MuC ’22), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3543758.3543776
[BibTeX] [PDF]
@InProceedings{ma2022muc,
author = {Yong Ma AND Yomna Abdelrahman AND Barbarella Petz AND Heiko Drewes AND Florian Alt AND Heinrich Hussmann AND Andreas Butz},
booktitle = {{Proceedings of the Conference on Mensch Und Computer}},
title = {{Enthusiasts, Pragmatists, and Sceptics: Investigating Users’ Attitudes Towards Emotion- and Personality-aware Voice Assistants across Cultures}},
year = {2022},
address = {New York, NY, USA},
note = {ma2022muc},
publisher = {Association for Computing Machinery},
series = {MuC '22},
doi = {10.1145/3543758.3543776},
isbn = {978-1-4503-9690-5/22/09},
location = {Darmstadt, Germany},
timestamp = {2022.09.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/ma2022muc.pdf},
}
A. Renz, M. Baldauf, E. Maier, and F. Alt. Alexa, It’s Me! An Online Survey on the User Experience of Smart Speaker Authentication. In Proceedings of the Conference on Mensch Und Computer (MuC ’22), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3543758.3543765
[BibTeX] [PDF]
@InProceedings{renz2022muc,
author = {Andreas Renz AND Matthias Baldauf AND Edith Maier AND Florian Alt},
booktitle = {{Proceedings of the Conference on Mensch Und Computer}},
title = {{Alexa, It’s Me! An Online Survey on the User Experience of Smart Speaker Authentication}},
year = {2022},
address = {New York, NY, USA},
note = {renz2022muc},
publisher = {Association for Computing Machinery},
series = {MuC '22},
doi = {10.1145/3543758.3543765},
isbn = {978-1-4503-9690-5/22/09},
location = {Darmstadt, Germany},
timestamp = {2022.09.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/renz2022muc.pdf},
}
M. Froehlich, F. Waltenberger, L. Trotter, F. Alt, and A. Schmidt. Blockchain and cryptocurrency in human computer interaction: a systematic literature review and research agenda. In Designing interactive systems conference (DIS ’22), Association for Computing Machinery, New York, NY, USA, 2022, p. 155–177. doi:10.1145/3532106.3533478
[BibTeX] [Abstract] [PDF]
We present a systematic literature review of cryptocurrency and blockchain research in Human-Computer Interaction (HCI) published between 2014 and 2021. We aim to provide an overview of the field, consolidate existing knowledge, and chart paths for future research. Our analysis of 99 articles identifies six major themes: (1) the role of trust, (2) understanding motivation, risk, and perception of cryptocurrencies, (3) cryptocurrency wallets, (4) engaging users with blockchain, (5) using blockchain for application-specific use cases, and (6) support tools for blockchain. We discuss the focus of the existing research body and juxtapose it to the changing landscape of emerging blockchain technologies to highlight future research avenues for HCI and interaction design. With this review, we identify key aspects where interaction design is critical for the adoption of blockchain systems. Doing so, we provide a starting point for new scholars and designers and help them position future contributions.
@InProceedings{froehlich2022dis,
author = {Froehlich, Michael and Waltenberger, Franz and Trotter, Ludwig and Alt, Florian and Schmidt, Albrecht},
booktitle = {Designing Interactive Systems Conference},
title = {Blockchain and Cryptocurrency in Human Computer Interaction: A Systematic Literature Review and Research Agenda},
year = {2022},
address = {New York, NY, USA},
note = {froehlich2022dis},
pages = {155–177},
publisher = {Association for Computing Machinery},
series = {DIS '22},
abstract = {We present a systematic literature review of cryptocurrency and blockchain research in Human-Computer Interaction (HCI) published between 2014 and 2021. We aim to provide an overview of the field, consolidate existing knowledge, and chart paths for future research. Our analysis of 99 articles identifies six major themes: (1) the role of trust, (2) understanding motivation, risk, and perception of cryptocurrencies, (3) cryptocurrency wallets, (4) engaging users with blockchain, (5) using blockchain for application-specific use cases, and (6) support tools for blockchain. We discuss the focus of the existing research body and juxtapose it to the changing landscape of emerging blockchain technologies to highlight future research avenues for HCI and interaction design. With this review, we identify key aspects where interaction design is critical for the adoption of blockchain systems. Doing so, we provide a starting point for new scholars and designers and help them position future contributions.},
doi = {10.1145/3532106.3533478},
isbn = {9781450393584},
keywords = {web3, trust, cryptocurrency, systematic literature review, blockchain, human computer interaction, hci, dlt, distributed ledger, dapps},
location = {Virtual Event, Australia},
numpages = {23},
timestamp = {2022.06.20},
url = {http://florian-alt.org/unibw/wp-content/publications/froehlich2022dis.pdf},
}
K. Guzij, M. Froehlich, F. Fincke, A. Schmidt, and F. Alt. Designing trustworthy user interfaces for the voluntary carbon market: a randomized online experiment. In Designing interactive systems conference (DIS ’22), Association for Computing Machinery, New York, NY, USA, 2022, p. 71–84. doi:10.1145/3532106.3533462
[BibTeX] [Abstract] [PDF]
The voluntary carbon market is an important building block in the fight against climate change. However, it is not trivial for consumers to verify whether carbon offset projects deliver what they promise. While technical solutions for measuring their impact are emerging, there is a lack of understanding of how to translate carbon offset data into trustworthy interface designs. With interaction between users and offset projects mainly happening online, it is critical to meet this design challenge. To this end, we designed and evaluated interfaces with varying trust cues for carbon offset projects in a randomized online experiment (n=244). Our results show that content design, particularly financial and forest-related quantitative data presented at the right detail level, increases the perceived trustworthiness, while images have no significant effect. We contribute the first specific guidance for interface designers for carbon offsets and discuss implications for interaction design.
@InProceedings{guzij2022dis,
author = {Guzij, Klaudia and Froehlich, Michael and Fincke, Florian and Schmidt, Albrecht and Alt, Florian},
booktitle = {Designing Interactive Systems Conference},
title = {Designing Trustworthy User Interfaces for the Voluntary Carbon Market: A Randomized Online Experiment},
year = {2022},
address = {New York, NY, USA},
note = {guzij2022dis},
pages = {71–84},
publisher = {Association for Computing Machinery},
series = {DIS '22},
abstract = {The voluntary carbon market is an important building block in the fight against climate change. However, it is not trivial for consumers to verify whether carbon offset projects deliver what they promise. While technical solutions for measuring their impact are emerging, there is a lack of understanding of how to translate carbon offset data into trustworthy interface designs. With interaction between users and offset projects mainly happening online, it is critical to meet this design challenge. To this end, we designed and evaluated interfaces with varying trust cues for carbon offset projects in a randomized online experiment (n=244). Our results show that content design, particularly financial and forest-related quantitative data presented at the right detail level, increases the perceived trustworthiness, while images have no significant effect. We contribute the first specific guidance for interface designers for carbon offsets and discuss implications for interaction design.},
doi = {10.1145/3532106.3533462},
isbn = {9781450393584},
keywords = {experiment, hci, carbon markets, charitable giving, trust, trustworthy interfaces},
location = {Virtual Event, Australia},
numpages = {14},
timestamp = {2022.06.20},
url = {http://florian-alt.org/unibw/wp-content/publications/guzij2022dis.pdf},
}
M. Froehlich, J. Vega, A. Pahl, S. Lotz, F. Alt, A. Schmidt, and I. Welpe. Prototyping With Blockchain: A Case Study For Teaching Blockchain Application Development at University. In Proceedings of the 25th international conference on interactive collaborative learning and 51st international conference on engineering pedagogy (ICL ’22), 2022.
[BibTeX] [PDF]
@InProceedings{froehlich2022icl,
author = {Froehlich, Michael AND Vega, Jose AND Pahl, Amelie AND Lotz, Sergej AND Alt, Florian AND Schmidt, Albrecht AND Welpe, Isabell},
booktitle = {Proceedings of the 25th International Conference on Interactive Collaborative Learning and 51st International Conference on Engineering Pedagogy},
title = {{Prototyping With Blockchain: A Case Study For Teaching Blockchain Application Development at University}},
year = {2022},
note = {froehlich2022icl},
series = {ICL '22},
journal = {25th International Conference on Interactive Collaborative Learning and 51st International Conference on Engineering Pedagogy},
timestamp = {2022.06.19},
url = {http://florian-alt.org/unibw/wp-content/publications/froehlich2022icl.pdf},
}
K. Reiter, K. Pfeuffer, A. Esteves, T. Mittermeier, and F. Alt. Look & Turn: One-Handed and Expressive Menu Interaction by Gaze and Arm Turns in VR. In Proceedings of the 2022 ACM Symposium on Eye Tracking Research & Applications (ETRA ’22), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3517031.3529233
[BibTeX] [Abstract] [PDF]
A user’s free hands provide an intuitive platform to position and design virtual menu interfaces. We explore how the hands and eyes can be integrated in the design of hand-attached menus. We synthesise past work from the literature and derive a design space that crosses properties of menu systems with an hand and eye input vocabulary. From this, we devise three menu systems that are based on the novel concept of Look & Turn: gaze indicates menu selection, and rotational turn of the wrist navigates menu and manipulates continuous parameters. Each technique allows users to interact with the hand-attached menu using the same hand, while keeping the other hand free for drawing. Based on a VR prototype that combines eye-tracking and glove-based finger tracking, we discuss first insights on technical and human factors of the promising interaction concept.
@InProceedings{reiter2022etra,
author = {Reiter, Katharina and Pfeuffer, Ken and Esteves, Augusto and Mittermeier, Tim and Alt, Florian},
booktitle = {{Proceedings of the 2022 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{Look \& Turn: One-Handed and Expressive Menu Interaction by Gaze and Arm Turns in VR}},
year = {2022},
address = {New York, NY, USA},
note = {reiter2022etra},
publisher = {Association for Computing Machinery},
series = {ETRA '22},
abstract = {A user’s free hands provide an intuitive platform to position and design virtual menu interfaces. We explore how the hands and eyes can be integrated in the design of hand-attached menus. We synthesise past work from the literature and derive a design space that crosses properties of menu systems with an hand and eye input vocabulary. From this, we devise three menu systems that are based on the novel concept of Look & Turn: gaze indicates menu selection, and rotational turn of the wrist navigates menu and manipulates continuous parameters. Each technique allows users to interact with the hand-attached menu using the same hand, while keeping the other hand free for drawing. Based on a VR prototype that combines eye-tracking and glove-based finger tracking, we discuss first insights on technical and human factors of the promising interaction concept.},
articleno = {66},
doi = {10.1145/3517031.3529233},
isbn = {9781450392525},
keywords = {gaze interaction, on-body menus, VR, virtual reality, extended reality, Multimodal interaction, handheld menus, XR},
location = {Seattle, WA, USA},
numpages = {7},
timestamp = {2022.06.09},
url = {http://www.florian-alt.org/unibw/wp-content/publications/reiter2022etra.pdf},
}
Y. Abdrabou, S. R. Rivu, T. Ammar, J. Liebers, A. Saad, C. Liebers, U. Gruenefeld, P. Knierim, M. Khamis, V. Makela, S. Schneegass, and F. Alt. Understanding shoulder surfer behavior and attack patterns using virtual reality. In Adjunct proceedings of the eighteenth symposium on usable privacy and security (SOUPS ’22), USENIX Association, 2022.
[BibTeX] [PDF]
@InProceedings{abdrabou2022soupsadj1,
author = {Abdrabou, Yasmeen and Rivu, Sheikh Radiah and Ammar, Tarek and Liebers, Jonathan and Saad, Alia and Liebers, Carina and Gruenefeld, Uwe and Knierim, Pascal and Khamis, Mohamed and Makela, Ville and Schneegass, Stefan and Alt, Florian},
booktitle = {Adjunct Proceedings of the Eighteenth Symposium on Usable Privacy and Security},
title = {Understanding Shoulder Surfer Behavior and Attack Patterns Using Virtual Reality},
year = {2022},
note = {abdrabou2022soupsadj1},
publisher = {USENIX Association},
series = {SOUPS '22},
timestamp = {2022.06.05},
url = {http://florian-alt.org/unibw/wp-content/publications/abdrabou2022soupsadj1.pdf},
}
Y. Abdrabou, J. Schütte, A. Shams, K. Pfeuffer, D. Buschek, M. Khamis, and F. Alt. Identifying Password Reuse from Gaze Behavior and Keystroke Dynamics. In Adjunct proceedings of the eighteenth symposium on usable privacy and security (SOUPS ’22), USENIX Association, 2022.
[BibTeX] [PDF]
@InProceedings{abdrabou2022soupsadj2,
author = {Yasmeen Abdrabou AND Johannes Sch\"{u}tte AND Ahmed Shams AND Ken Pfeuffer AND Daniel Buschek AND Mohamed Khamis AND Florian Alt},
booktitle = {Adjunct Proceedings of the Eighteenth Symposium on Usable Privacy and Security},
title = {{Identifying Password Reuse from Gaze Behavior and Keystroke Dynamics}},
year = {2022},
note = {abdrabou2022soupsadj2},
publisher = {USENIX Association},
series = {SOUPS '22},
timestamp = {2022.06.05},
url = {http://florian-alt.org/unibw/wp-content/publications/abdrabou2022soupsadj2.pdf},
}
S. D. Rodriguez, L. Mecke, and F. Alt. Sensehandle: investigating human-door interaction behaviour for authentication in the physical world. In Adjunct proceedings of the eighteenth symposium on usable privacy and security (SOUPS ’22), USENIX Association, 2022.
[BibTeX] [PDF]
@InProceedings{delgado2022soupsadj,
author = {Sarah Delgado Rodriguez AND Lukas Mecke AND Florian Alt},
booktitle = {Adjunct Proceedings of the Eighteenth Symposium on Usable Privacy and Security},
title = {SenseHandle: Investigating Human-Door Interaction Behaviour for Authentication in the Physical World},
year = {2022},
note = {delgado2022soupsadj},
publisher = {USENIX Association},
series = {SOUPS '22},
timestamp = {2022.06.05},
url = {http://florian-alt.org/unibw/wp-content/publications/delgado2022soupsadj.pdf},
}
S. Prange, A. Shams, R. Piening, Y. Abdelrahman, and F. Alt. PriView – Exploring Visualisations Supporting Users’ Privacy Awareness. In Adjunct proceedings of the eighteenth symposium on usable privacy and security (SOUPS ’22), USENIX Association, 2022.
[BibTeX] [PDF]
@InProceedings{prange2022soupsadj,
author = {Sarah Prange AND Ahmed Shams AND Robin Piening AND Yomna Abdelrahman AND Florian Alt},
booktitle = {Adjunct Proceedings of the Eighteenth Symposium on Usable Privacy and Security},
title = {{PriView -- Exploring Visualisations Supporting Users' Privacy Awareness}},
year = {2022},
note = {abdrabou2022soupsadj1},
publisher = {USENIX Association},
series = {SOUPS '22},
timestamp = {2022.06.05},
url = {http://florian-alt.org/unibw/wp-content/publications/abdrabou2022soupsadj1.pdf},
}
Y. Abdrabou, J. Schütte, A. Shams, K. Pfeuffer, D. Buschek, M. Khamis, and F. Alt. “Your Eyes Say You Have Used This Password Before”: Identifying Password Reuse from Gaze Behavior and Keystroke Dynamics. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems (CHI ’22), Association for Computing Machinery, New York, NY, USA, 2022.
[BibTeX] [Abstract] [PDF] [Video]
A significant drawback of text passwords for end-user authentication is password reuse. We propose a novel approach to detect password reuse by leveraging gaze as well as typing behavior and study its accuracy. We collected gaze and typing behavior from 49 users while creating accounts for 1) a webmail client and 2) a news website. While most participants came up with a new password, 32% reported having reused an old password when setting up their accounts. We then compared different ML models to detect password reuse from the collected data. Our models achieve an accuracy of up to 87.7% in detecting password reuse from gaze, 75.8% accuracy from typing, and 88.75% when considering both types of behavior. We demonstrate that \revised{using gaze, password} reuse can already be detected during the registration process, before users entered their password. Our work paves the road for developing novel interventions to prevent password reuse.
@InProceedings{abdrabou2022chi,
author = {Yasmeen Abdrabou AND Johannes Sch\"{u}tte AND Ahmed Shams AND Ken Pfeuffer AND Daniel Buschek AND Mohamed Khamis AND Florian Alt},
booktitle = {{Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems}},
title = {{"Your Eyes Say You Have Used This Password Before": Identifying Password Reuse from Gaze Behavior and Keystroke Dynamics}},
year = {2022},
address = {New York, NY, USA},
note = {abdrabou2022chi},
publisher = {Association for Computing Machinery},
series = {CHI ’22},
abstract = {A significant drawback of text passwords for end-user authentication is password reuse. We propose a novel approach to detect password reuse by leveraging gaze as well as typing behavior and study its accuracy. We collected gaze and typing behavior from 49 users while creating accounts for 1) a webmail client and 2) a news website. While most participants came up with a new password, 32% reported having reused an old password when setting up their accounts. We then compared different ML models to detect password reuse from the collected data. Our models achieve an accuracy of up to 87.7% in detecting password reuse from gaze, 75.8% accuracy from typing, and 88.75% when considering both types of behavior. We demonstrate that \revised{using gaze, password} reuse can already be detected during the registration process, before users entered their password. Our work paves the road for developing novel interventions to prevent password reuse.},
isbn = {9781450367080},
location = {New Orleans, LA, USA},
timestamp = {2022.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/abdrabou2022chi.pdf},
video = {abdrabou2022chi},
}
V. Mäekelä, J. Winter, J. Schwab, M. Koch, and F. Alt. Pandemic Displays: Considering Hygiene on Public Touchscreens in the Post-Pandemic Era. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems (CHI ’22), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3491102.3501937
[BibTeX] [Abstract] [PDF] [Video]
In this paper, we investigate hygiene measures on public touchscreens. The COVID-19 pandemic created unprecedented questions for touch-based public displays regarding hygiene, risks, and general awareness. We study A) how COVID-19 has affected the perceptions of shared touchscreens and B) how touchscreens could be improved through hygiene-related functions. First, we report the results from an online survey (n = 286). Second, we present a hygiene concept for touchscreens that visualizes prior touches and provides information about the cleaning of the display and number of prior users. Third, we report the feedback for our hygiene concept from 77 participants. We find that there is demand for improved awareness of public displays’ hygiene status, especially among those with stronger concerns about COVID-19. A particularly desired detail is when the display has been cleaned. For visualizing prior touches, fingerprints worked the best. We present further considerations for designing for hygiene on public displays.
@InProceedings{maekelae2022chi,
author = {Ville M\"{a}ekel\"{a} AND Jonas Winter AND Jasmin Schwab AND Michael Koch AND Florian Alt},
booktitle = {{Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems}},
title = {{Pandemic Displays: Considering Hygiene on Public Touchscreens in the Post-Pandemic Era}},
year = {2022},
address = {New York, NY, USA},
note = {maekelae2022chi},
publisher = {Association for Computing Machinery},
series = {CHI ’22},
abstract = {In this paper, we investigate hygiene measures on public touchscreens. The COVID-19 pandemic created unprecedented questions for touch-based public displays regarding hygiene, risks, and general awareness. We study A) how COVID-19 has affected the perceptions of shared touchscreens and B) how touchscreens could be improved through hygiene-related functions. First, we report the results from an online survey (n = 286). Second, we present a hygiene concept for touchscreens that visualizes prior touches and provides information about the cleaning of the display and number of prior users. Third, we report the feedback for our hygiene concept from 77 participants. We find that there is demand for improved awareness of public displays' hygiene status, especially among those with stronger concerns about COVID-19. A particularly desired detail is when the display has been cleaned. For visualizing prior touches, fingerprints worked the best. We present further considerations for designing for hygiene on public displays.},
doi = {10.1145/3491102.3501937},
isbn = {9781450367080},
location = {New Orleans, LA, USA},
timestamp = {2022.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/maekelae2022chi.pdf},
video = {maekelae2022chi},
}
S. Prange, S. D. Rodriguez, T. Döding, and F. Alt. “Where did you first meet the owner?” – Exploring Usable Authentication for Smart Home Visitors. In Extended Abstracts of the 2022 CHI Conference on Human Factors in Computing Systems (CHI EA ’22), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3491101.3519777
[BibTeX] [Abstract] [PDF] [Video] [Talk]
In this paper, we investigate hygiene measures on public touchscreens. The COVID-19 pandemic created unprecedented questions for touch-based public displays regarding hygiene, risks, and general awareness. We study A) how COVID-19 has affected the perceptions of shared touchscreens and B) how touchscreens could be improved through hygiene-related functions. First, we report the results from an online survey (n = 286). Second, we present a hygiene concept for touchscreens that visualizes prior touches and provides information about the cleaning of the display and number of prior users. Third, we report the feedback for our hygiene concept from 77 participants. We find that there is demand for improved awareness of public displays’ hygiene status, especially among those with stronger concerns about COVID-19. A particularly desired detail is when the display has been cleaned. For visualizing prior touches, fingerprints worked the best. We present further considerations for designing for hygiene on public displays.
@InProceedings{prange2022chiea,
author = {Sarah Prange AND Sarah Delgado Rodriguez AND Timo D\"{o}ding AND Florian Alt},
booktitle = {{Extended Abstracts of the 2022 CHI Conference on Human Factors in Computing Systems}},
title = {{"Where did you first meet the owner?'' -- Exploring Usable Authentication for Smart Home Visitors}},
year = {2022},
address = {New York, NY, USA},
note = {prange2022chiea},
publisher = {Association for Computing Machinery},
series = {CHI EA ’22},
abstract = {In this paper, we investigate hygiene measures on public touchscreens. The COVID-19 pandemic created unprecedented questions for touch-based public displays regarding hygiene, risks, and general awareness. We study A) how COVID-19 has affected the perceptions of shared touchscreens and B) how touchscreens could be improved through hygiene-related functions. First, we report the results from an online survey (n = 286). Second, we present a hygiene concept for touchscreens that visualizes prior touches and provides information about the cleaning of the display and number of prior users. Third, we report the feedback for our hygiene concept from 77 participants. We find that there is demand for improved awareness of public displays' hygiene status, especially among those with stronger concerns about COVID-19. A particularly desired detail is when the display has been cleaned. For visualizing prior touches, fingerprints worked the best. We present further considerations for designing for hygiene on public displays.},
doi = {10.1145/3491101.3519777},
isbn = {9781450367080},
location = {New Orleans, LA, USA},
poster = {prange2022chiea},
talk = {prange2022chiea},
timestamp = {2022.05.02},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2022chiea.pdf},
video = {prange2022chiea},
}
V. Volk, S. Prange, and F. Alt. PriCheck – An Online Privacy Assistant for Smart Device Purchases. In Extended Abstracts of the 2022 CHI Conference on Human Factors in Computing Systems (CHI EA ’22), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3491101.3519827
[BibTeX] [Abstract] [PDF] [Video] [Talk]
In this paper, we present PriCheck, a browser extension that provides privacy-relevant information about smart devices (e.g., in an online shop). This information is oftentimes hidden, difficult to access, and, thus, often neglected when buying a new device. With PriCheck, we enable users to make informed purchase decisions. We conducted an exploratory study using the browser extension in a simplified (mock) online shop for smart devices. Participants chose devices with and without using the extension. We found that participants (N = 11) appreciated the usability and available information of PriCheck, helping them with informed decisions for privacy-preserving products. We hope our work will stimulate further discussion on how to make privacy information for novel products available, understandable, and easy to access for users.
@InProceedings{volk2022chiea,
author = {Vera Volk AND Sarah Prange AND Florian Alt},
booktitle = {{Extended Abstracts of the 2022 CHI Conference on Human Factors in Computing Systems}},
title = {{PriCheck -- An Online Privacy Assistant for Smart Device Purchases}},
year = {2022},
address = {New York, NY, USA},
note = {volk2022chiea},
publisher = {Association for Computing Machinery},
series = {CHI EA ’22},
abstract = {In this paper, we present PriCheck, a browser extension that provides privacy-relevant information about smart devices (e.g., in an online shop). This information is oftentimes hidden, difficult to access, and, thus, often neglected when buying a new device. With PriCheck, we enable users to make informed purchase decisions. We conducted an exploratory study using the browser extension in a simplified (mock) online shop for smart devices. Participants chose devices with and without using the extension. We found that participants (N = 11) appreciated the usability and available information of PriCheck, helping them with informed decisions for privacy-preserving products. We hope our work will stimulate further discussion on how to make privacy information for novel products available, understandable, and easy to access for users.},
doi = {10.1145/3491101.3519827},
isbn = {9781450367080},
location = {New Orleans, LA, USA},
poster = {volk2022chiea},
talk = {volk2022chiea},
timestamp = {2022.05.02},
url = {http://florian-alt.org/unibw/wp-content/publications/volk2022chiea.pdf},
video = {volk2022chiea},
}
M. Khamis, K. Marky, A. Bulling, and F. Alt. User-centred multimodal authentication: securing handheld mobile devices using gaze and touch input. Behaviour & information technology, pp. 1-23, 2022. doi:10.1080/0144929X.2022.2069597
[BibTeX] [PDF]
@Article{khamis2022bit,
author = {Mohamed Khamis and Karola Marky and Andreas Bulling and Florian Alt},
journal = {Behaviour \& Information Technology},
title = {User-centred multimodal authentication: securing handheld mobile devices using gaze and touch input},
year = {2022},
note = {khamis2022bit},
number = {0},
pages = {1-23},
volume = {0},
doi = {10.1080/0144929X.2022.2069597},
eprint = {https://doi.org/10.1080/0144929X.2022.2069597},
publisher = {Taylor & Francis},
timestamp = {2022.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/khamis2022bit.pdf},
}
A. Saad, U. Gruenefeld, L. Mecke, M. Koelle, F. Alt, and S. Schneegass. Mask removal isn’t always convenient in public! – the impact of the covid-19 pandemic on device usage and user authentication. In Extended abstracts of the 2022 chi conference on human factors in computing systems (CHI EA ’22), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3491101.3519804
[BibTeX] [Abstract] [PDF] [Video] [Talk]
The ongoing Covid-19 pandemic has impacted our everyday lives and demands everyone to take countermeasures such as wearing masks or disinfecting their hands. However, while previous work suggests that these countermeasures may profoundly impact biometric authentication, an investigation of the actual impact is still missing. Hence, in this work, we present our findings from an online survey (n=334) on experienced changes in device usage and failures of authentication. Our results show significant changes in personal and shared device usage, as well as a significant increase in experienced failures when comparing the present situation to before the Covid-19 pandemic. From our qualitative analysis of participants’ responses, we derive potential reasons for these changes in device usage and increases in authentication failures. Our findings suggest that making authentication contactless is only one of the aspects relevant to encounter the novel challenges caused by the pandemic.
@InProceedings{saad2022chiea,
author = {Saad, Alia and Gruenefeld, Uwe and Mecke, Lukas and Koelle, Marion and Alt, Florian and Schneegass, Stefan},
booktitle = {Extended Abstracts of the 2022 CHI Conference on Human Factors in Computing Systems},
title = {Mask Removal Isn’t Always Convenient in Public! – The Impact of the Covid-19 Pandemic on Device Usage and User Authentication},
year = {2022},
address = {New York, NY, USA},
note = {saad2022chiea},
publisher = {Association for Computing Machinery},
series = {CHI EA '22},
abstract = {The ongoing Covid-19 pandemic has impacted our everyday lives and demands everyone to take countermeasures such as wearing masks or disinfecting their hands. However, while previous work suggests that these countermeasures may profoundly impact biometric authentication, an investigation of the actual impact is still missing. Hence, in this work, we present our findings from an online survey (n=334) on experienced changes in device usage and failures of authentication. Our results show significant changes in personal and shared device usage, as well as a significant increase in experienced failures when comparing the present situation to before the Covid-19 pandemic. From our qualitative analysis of participants’ responses, we derive potential reasons for these changes in device usage and increases in authentication failures. Our findings suggest that making authentication contactless is only one of the aspects relevant to encounter the novel challenges caused by the pandemic.},
articleno = {218},
doi = {10.1145/3491101.3519804},
isbn = {9781450391566},
keywords = {authentication, biometrics, usable security, mobile devices, Covid-19},
location = {New Orleans, LA, USA},
numpages = {7},
poster = {saad2022chiea},
talk = {saad2022chiea},
timestamp = {2022.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/saad2022chiea.pdf},
video = {saad2022chiea},
}
Y. Abdelrahman, F. Mathis, P. Knierim, A. Kettler, F. Alt, and M. Khamis. Cuevr: studying the usability of cue-based authentication for virtual reality. In Proceedings of the 2022 international conference on advanced visual interfaces (AVI 2022), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3531073.3531092
[BibTeX] [Abstract] [PDF]
Existing virtual reality (VR) authentication schemes are either slow or prone to observation attacks. We propose CueVR, a cue-based authentication scheme that is resilient against observation attacks by design since vital cues are randomly generated and only visible to the user experiencing the VR environment. We investigate three different input modalities through an in-depth usability study (N=20) and show that while authentication using CueVR is slower than the less secure baseline, it is faster than existing observation resilient cue-based schemes and VR schemes (4.151 s – 7.025 s to enter a 4-digit PIN). Our results also indicate that using the controllers’ trackpad significantly outperforms input using mid-air gestures. We conclude by discussing how visual cues can enhance the security of VR authentication while maintaining high usability. Furthermore, we show how existing real-world authentication schemes combined with VR’s unique characteristics can advance future VR authentication procedures.
@InProceedings{abdelrahman2022avi,
author = {Abdelrahman, Yomna and Mathis, Florian and Knierim, Pascal and Kettler, Axel and Alt, Florian and Khamis, Mohamed},
booktitle = {Proceedings of the 2022 International Conference on Advanced Visual Interfaces},
title = {CueVR: Studying the Usability of Cue-Based Authentication for Virtual Reality},
year = {2022},
address = {New York, NY, USA},
note = {abdelrahman2022avi},
publisher = {Association for Computing Machinery},
series = {AVI 2022},
abstract = {Existing virtual reality (VR) authentication schemes are either slow or prone to observation attacks. We propose CueVR, a cue-based authentication scheme that is resilient against observation attacks by design since vital cues are randomly generated and only visible to the user experiencing the VR environment. We investigate three different input modalities through an in-depth usability study (N=20) and show that while authentication using CueVR is slower than the less secure baseline, it is faster than existing observation resilient cue-based schemes and VR schemes (4.151 s – 7.025 s to enter a 4-digit PIN). Our results also indicate that using the controllers’ trackpad significantly outperforms input using mid-air gestures. We conclude by discussing how visual cues can enhance the security of VR authentication while maintaining high usability. Furthermore, we show how existing real-world authentication schemes combined with VR’s unique characteristics can advance future VR authentication procedures.},
articleno = {34},
doi = {10.1145/3531073.3531092},
isbn = {9781450397193},
keywords = {Authentication, Usable Security, Virtual Reality},
location = {Frascati, Rome, Italy},
numpages = {9},
timestamp = {2022.04.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2022avi.pdf},
}
Y. Abdrabou, S. R. Rivu, T. Ammar, J. Liebers, A. Saad, C. Liebers, U. Gruenefeld, P. Knierim, M. Khamis, V. Makela, S. Schneegass, and F. Alt. Understanding shoulder surfer behavior and attack patterns using virtual reality. In Proceedings of the 2022 international conference on advanced visual interfaces (AVI 2022), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3531073.3531106
[BibTeX] [Abstract] [PDF]
In this work, we explore attacker behavior during shoulder surfing. As such behavior is often opportunistic and difficult to observe in real world settings, we leverage the capabilities of virtual reality (VR). We recruited 24 participants and observed their behavior in two virtual waiting scenarios: at a bus stop and in an open office space. In both scenarios, participants shoulder surfed private screens displaying different types of content. From the results we derive an understanding of factors influencing shoulder surfing behavior, reveal common attack patterns, and sketch a behavioral shoulder surfing model. Our work suggests directions for future research on shoulder surfing and can serve as a basis for creating novel approaches to mitigate shoulder surfing.
@InProceedings{abdrabou2022avi,
author = {Abdrabou, Yasmeen and Rivu, Sheikh Radiah and Ammar, Tarek and Liebers, Jonathan and Saad, Alia and Liebers, Carina and Gruenefeld, Uwe and Knierim, Pascal and Khamis, Mohamed and Makela, Ville and Schneegass, Stefan and Alt, Florian},
booktitle = {Proceedings of the 2022 International Conference on Advanced Visual Interfaces},
title = {Understanding Shoulder Surfer Behavior and Attack Patterns Using Virtual Reality},
year = {2022},
address = {New York, NY, USA},
note = {abdrabou2022avi},
publisher = {Association for Computing Machinery},
series = {AVI 2022},
abstract = {In this work, we explore attacker behavior during shoulder surfing. As such behavior is often opportunistic and difficult to observe in real world settings, we leverage the capabilities of virtual reality (VR). We recruited 24 participants and observed their behavior in two virtual waiting scenarios: at a bus stop and in an open office space. In both scenarios, participants shoulder surfed private screens displaying different types of content. From the results we derive an understanding of factors influencing shoulder surfing behavior, reveal common attack patterns, and sketch a behavioral shoulder surfing model. Our work suggests directions for future research on shoulder surfing and can serve as a basis for creating novel approaches to mitigate shoulder surfing.},
articleno = {15},
doi = {10.1145/3531073.3531106},
isbn = {9781450397193},
keywords = {Eye Tracking, Shoulder Surfing, User Behavior, Virtual Reality},
location = {Frascati, Rome, Italy},
numpages = {9},
timestamp = {2022.04.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdrabou2022avi.pdf},
}
S. Prange, N. Thiem, M. Fröhlich, and F. Alt. “secure settings are quick and easy!” – motivating end-users to choose secure smart home configurations. In Proceedings of the 2022 international conference on advanced visual interfaces (AVI 2022), Association for Computing Machinery, New York, NY, USA, 2022. doi:10.1145/3531073.3531089
[BibTeX] [Abstract] [PDF]
While offering many useful features, novel smart home devices also provide an attack surface to users’ allegedly secure place: their homes. Thus, it is essential to employ effective threat mitigation strategies, such as securely configuring devices. We investigate how users can be motivated to do so. To foster secure actions, we designed two types of nudges based on the Protection Motivation Theory (PMT): one with low and one with high level of detail. As such, our nudges particularly target users’ threat appraisal (including perceived severity and likelihood of threats) and self-efficacy to take action. In a randomized online experiment (N = 210), we simulated a smart home setup procedure. Participants chose significantly more secure configurations when being provided with detailed nudges, and indicated higher perceived threat and coping appraisal (i.e., higher protection motivation) after the experiment. Based on our results, we discuss the design and deployment of nudges for (future) smart home setup procedures. Our work can help to a) increase users’ threat awareness in general, and b) motivate users to take actions such as securely configuring their devices.
@InProceedings{prange2022avi,
author = {Prange, Sarah and Thiem, Niklas and Fr\"{o}hlich, Michael and Alt, Florian},
booktitle = {Proceedings of the 2022 International Conference on Advanced Visual Interfaces},
title = {“Secure Settings Are Quick and Easy!” – Motivating End-Users to Choose Secure Smart Home Configurations},
year = {2022},
address = {New York, NY, USA},
note = {prange2022avi},
publisher = {Association for Computing Machinery},
series = {AVI 2022},
abstract = {While offering many useful features, novel smart home devices also provide an attack surface to users’ allegedly secure place: their homes. Thus, it is essential to employ effective threat mitigation strategies, such as securely configuring devices. We investigate how users can be motivated to do so. To foster secure actions, we designed two types of nudges based on the Protection Motivation Theory (PMT): one with low and one with high level of detail. As such, our nudges particularly target users’ threat appraisal (including perceived severity and likelihood of threats) and self-efficacy to take action. In a randomized online experiment (N = 210), we simulated a smart home setup procedure. Participants chose significantly more secure configurations when being provided with detailed nudges, and indicated higher perceived threat and coping appraisal (i.e., higher protection motivation) after the experiment. Based on our results, we discuss the design and deployment of nudges for (future) smart home setup procedures. Our work can help to a) increase users’ threat awareness in general, and b) motivate users to take actions such as securely configuring their devices.},
articleno = {20},
doi = {10.1145/3531073.3531089},
isbn = {9781450397193},
keywords = {Smart Home, Protection Motivation Theory, Usable Security},
location = {Frascati, Rome, Italy},
numpages = {9},
timestamp = {2022.04.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2022avi.pdf},
}
F. Alt. Wie die Forschung auf das Metaversum blickt. Inside.unibw, vol. 9, p. 3, 2022.
[BibTeX] [PDF]
@Article{alt2022insideunibw,
author = {Florian Alt},
journal = {inside.unibw},
title = {{Wie die Forschung auf das Metaversum blickt}},
year = {2022},
month = may,
note = {alt2022insideunibw},
pages = {3},
volume = {9},
timestamp = {2022.04.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2022insideunibw.pdf},
}
P. A. Rauschnabel, R. Felix, C. Hinsch, H. Shahab, and F. Alt. What is xr? towards a framework for augmented and virtual reality. Computers in human behavior, vol. 133, p. 107289, 2022. doi:https://doi.org/10.1016/j.chb.2022.107289
[BibTeX] [Abstract] [PDF]
Augmented Reality (AR), Virtual Reality (VR), Mixed Reality, and Extended Reality (often – misleadingly – abbreviated as XR) are commonly used terms to describe how technologies generate or modify reality. However, academics and professionals have been inconsistent in their use of these terms. This has led to conceptual confusion and unclear demarcations. Inspired by prior research and qualitative insights from XR professionals, we discuss the meaning and definitions of various terms and organize them in our proposed framework. As a result, we conclude that (1) XR should not be used to connote extended reality, but as a more open approach where the X implies the unknown variable: xReality; (2) AR and VR have fundamental differences and thus should be treated as different experiences; (3) AR experiences can be described on a continuum ranging from assisted reality to mixed reality (based on the level of local presence); and (4), VR experiences can be conceptualized on a telepresence-continuum ranging from atomistic to holistic VR.
@Article{rauschnabel2022chb,
author = {Philipp A. Rauschnabel and Reto Felix and Chris Hinsch and Hamza Shahab and Florian Alt},
journal = {Computers in Human Behavior},
title = {What is XR? Towards a Framework for Augmented and Virtual Reality},
year = {2022},
issn = {0747-5632},
note = {rauschnabel2022chb},
pages = {107289},
volume = {133},
abstract = {Augmented Reality (AR), Virtual Reality (VR), Mixed Reality, and Extended Reality (often – misleadingly – abbreviated as XR) are commonly used terms to describe how technologies generate or modify reality. However, academics and professionals have been inconsistent in their use of these terms. This has led to conceptual confusion and unclear demarcations. Inspired by prior research and qualitative insights from XR professionals, we discuss the meaning and definitions of various terms and organize them in our proposed framework. As a result, we conclude that (1) XR should not be used to connote extended reality, but as a more open approach where the X implies the unknown variable: xReality; (2) AR and VR have fundamental differences and thus should be treated as different experiences; (3) AR experiences can be described on a continuum ranging from assisted reality to mixed reality (based on the level of local presence); and (4), VR experiences can be conceptualized on a telepresence-continuum ranging from atomistic to holistic VR.},
doi = {https://doi.org/10.1016/j.chb.2022.107289},
keywords = {Definition, Augmented reality, Virtual reality, Mixed reality, Assisted reality, Metaverse},
timestamp = {2022.03.31},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rauschnabel2022chb.pdf},
}
F. Alt, V. Kostakos, and N. Olivier. Out-of-the-Lab Pervasive Computing (Editorial). IEEE Pervasive Computing, vol. 21, iss. 1, pp. 7-8, 2022. doi:10.1109/MPRV.2022.3147105
[BibTeX] [PDF]
@Article{alt2022ieeepvc,
author = {Florian Alt AND Vassilis Kostakos AND Nuria Olivier},
journal = {{IEEE Pervasive Computing}},
title = {{Out-of-the-Lab Pervasive Computing (Editorial)}},
year = {2022},
note = {alt2022ieeepvc},
number = {1},
pages = {7-8},
volume = {21},
doi = {10.1109/MPRV.2022.3147105},
keywords = {Special issues and sections;COVID-19;Remote laboratories;Smart devices;Research and development;Distance learning;Crowdsensing;Collaboration},
timestamp = {2022.01.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2022ieeepvc.pdf},
}
L. Sahoo, N. S. Miazi, M. Shehab, F. Alt, and Y. Abdelrahman. You Know Too Much: Investigating Users’ Perceptions and Privacy Concerns Towards Thermal Imaging. In Proceedings of the 2022 Privacy Symposium (Privacy’22), 2022. doi:10.1007/978-3-031-09901-4_11
[BibTeX] [PDF]
@InProceedings{sahoo2022privacy,
author = {Lipsarani Sahoo AND Nazmus Sakib Miazi AND Mohamed Shehab AND Florian Alt AND Yomna Abdelrahman},
booktitle = {{Proceedings of the 2022 Privacy Symposium}},
title = {{You Know Too Much: Investigating Users’ Perceptions and Privacy Concerns Towards Thermal Imaging}},
year = {2022},
note = {sahoo2022privacy},
series = {Privacy'22},
doi = {10.1007/978-3-031-09901-4_11},
timestamp = {2022.01.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/sahoo2022privacy.pdf},
}
Y. Abdrabou, R. Rivu, T. Ammar, J. Liebers, A. Saad, C. Liebers, U. Gruenefeld, P. Knierim, M. Khamis, V. Maekelae, S. Schneegass, and F. Alt. Understanding Shoulder Surfer Behavior Using Virtual Reality. In Adjunct Proceedings of the IEEE Conference on Virtual Reality and 3D User Interfaces (), 2022.
[BibTeX] [PDF]
@InProceedings{abdrabou2022ieeevr,
author = {Yasmeen Abdrabou AND Radiah Rivu AND Tarek Ammar AND Jonathan Liebers AND Alia Saad AND Carina Liebers AND Uwe Gruenefeld AND Pascal Knierim AND Mohamed Khamis AND Ville Maekelae AND Stefan Schneegass AND Florian Alt},
booktitle = {{Adjunct Proceedings of the IEEE Conference on Virtual Reality and 3D User Interfaces}},
title = {{Understanding Shoulder Surfer Behavior Using Virtual Reality}},
year = {2022},
note = {abdrabou2022ieeevr},
timestamp = {2022.01.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdrabou2022ieeevr.pdf},
}
M. Khamis, K. Marky, A. Bulling, and F. Alt. User-centred multimodal authentication: securing handheld mobile devices using gaze and touch input. Behaviour & information technology, vol. 41, iss. 10, pp. 2061-2083, 2022. doi:10.1080/0144929X.2022.2069597
[BibTeX] [PDF]
@Article{khamis2022bit,
author = {Mohamed Khamis and Karola Marky and Andreas Bulling and Florian Alt},
journal = {Behaviour \& Information Technology},
title = {User-centred multimodal authentication: securing handheld mobile devices using gaze and touch input},
year = {2022},
note = {khamis2022bit},
number = {10},
pages = {2061-2083},
volume = {41},
doi = {10.1080/0144929X.2022.2069597},
eprint = {https://doi.org/10.1080/0144929X.2022.2069597},
publisher = {Taylor & Francis},
timestamp = {2022-12-01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2022bit.pdf},
}
P. Knierim, S. Prange, S. Feger, S. Schneegaß, A. Sasse, D. Bayerl, H. Hof, and F. Alt. Inclusive security by design. In Mensch und computer 2022 – workshopband (MuC ’22), Gesellschaft für Informatik e.V., Bonn, 2022. doi:10.18420/muc2022-mci-ws14-128
[BibTeX] [PDF]
@InProceedings{knierim2022mucadj,
author = {Knierim, Pascal and Prange, Sarah and Feger, Sebastian and Schneegaß, Stefan and Sasse, Angela and Bayerl, Dominik and Hof, Hans-Joachim and Alt, Florian},
booktitle = {Mensch und Computer 2022 - Workshopband},
title = {Inclusive Security by Design},
year = {2022},
address = {Bonn},
note = {knierim2022mucadj},
publisher = {Gesellschaft für Informatik e.V.},
series = {MuC '22},
doi = {10.18420/muc2022-mci-ws14-128},
timestamp = {2022-09-01},
url = {http://florian-alt.org/unibw/wp-content/publications/knierim2022mucadj.pdf},
}

2021

F. Alt. Pervasive security and privacy—a brief reflection on challenges and opportunities. Ieee pervasive computing, vol. 20, iss. 4, p. 5, 2021. doi:10.1109/MPRV.2021.3110539
[BibTeX] [PDF]
@Article{alt2021ieeepvc,
author = {Florian Alt},
journal = {IEEE Pervasive Computing},
title = {Pervasive Security and Privacy—A Brief Reflection on Challenges and Opportunities},
year = {2021},
issn = {1558-2590},
month = dec,
note = {alt2021ieeepvc},
number = {4},
pages = {5},
volume = {20},
doi = {10.1109/MPRV.2021.3110539},
timestamp = {2021.12.09},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2021ieeepvc.pdf},
}
F. Alt and S. Schneegass. Beyond Passwords—Challenges and Opportunities of Future Authentication. IEEE Security & Privacy, vol. 20, iss. 1, pp. 82-86, 2021. doi:10.1109/MSEC.2021.3127459
[BibTeX] [PDF]
@Article{alt2021ieeesp,
author = {Florian Alt AND Stefan Schneegass},
journal = {{IEEE Security \& Privacy}},
title = {{Beyond Passwords—Challenges and Opportunities of Future Authentication}},
year = {2021},
note = {alt2021ieeesp},
number = {1},
pages = {82-86},
volume = {20},
doi = {10.1109/MSEC.2021.3127459},
timestamp = {2021.12.09},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2021ieeesp.pdf},
}
M. Braun, F. Weber, and F. Alt. Affective Automotive User Interfaces – Reviewing the State of Driver Affect Research and Emotion Regulation in the Car. ACM Computing Surveys, vol. 54, iss. 7, 2021. doi:10.1145/3460938
[BibTeX] [Abstract] [PDF]
Affective technology offers exciting opportunities to improve road safety by catering to human emotions.Modern car interiors enable the contactless detection of user states, paving the way for a systematic promotionof safe driver behavior through emotion regulation. We review the current literature regarding the impact ofemotions on driver behavior and analyze the state of emotion regulation approaches in the car. We summarizechallenges for affective interaction in form of technological hurdles and methodological considerations, as wellas opportunities to improve road safety by reinstating drivers into an emotionally balanced state. The purposeof this review is to outline the community’s combined knowledge for interested researchers, to provide afocussed introduction for practitioners, raise awareness for cultural aspects, and to identify future directionsfor affective interaction in the car.
@Article{braun2021csur,
author = {Michael Braun AND Florian Weber AND Florian Alt},
journal = {{ACM Computing Surveys}},
title = {{Affective Automotive User Interfaces – Reviewing the State of Driver Affect Research and Emotion Regulation in the Car}},
year = {2021},
issn = {0360-0300},
month = {sep},
note = {braun2021csur},
number = {7},
volume = {54},
abstract = {Affective technology offers exciting opportunities to improve road safety by catering to human emotions.Modern car interiors enable the contactless detection of user states, paving the way for a systematic promotionof safe driver behavior through emotion regulation. We review the current literature regarding the impact ofemotions on driver behavior and analyze the state of emotion regulation approaches in the car. We summarizechallenges for affective interaction in form of technological hurdles and methodological considerations, as wellas opportunities to improve road safety by reinstating drivers into an emotionally balanced state. The purposeof this review is to outline the community’s combined knowledge for interested researchers, to provide afocussed introduction for practitioners, raise awareness for cultural aspects, and to identify future directionsfor affective interaction in the car.},
address = {New York, NY, USA},
articleno = {137},
doi = {10.1145/3460938},
issue_date = {March 2021},
keywords = {automotive user interfaces, emotion regulation, Affective computing},
numpages = {25},
publisher = {Association for Computing Machinery},
timestamp = {2021.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2021csur.pdf},
}
M. Froehlich, P. Hulm, and F. Alt. Under Pressure – A User-Centered Threat Model for Cryptocurrency Owners. In Proceedings of the Fourth International Conference on Blockchain Technology and Applications (ICBTA ’21), 2021.
[BibTeX] [Abstract] [PDF] [Video]
Cryptocurrencies have gained popularity in recent years. However, for many users, keeping ownership of their cryptocurrency is a complex task. News reports frequently bear witness to scams, hacked exchanges, and fortunes beyond retrieval. However, we lack a systematic understanding of user-centered cryptocurrency threats, as causes leading to loss are scattered across publications. To address this gap, we conducted a focus group (n=6) and an expert elicitation study (n=25) following a three-round Delphi process with a heterogeneous group of blockchain and security experts from academia and industry.We contribute the first systematic overview of threats cryptocurrency users are exposed to and propose six overarching categories. Our work is complemented by a discussion on how the human-computer-interaction community can address these threats and how practitioners can use the model to understand situations in which users might find themselves under the pressure of an attack to ultimately engineer more secure systems.
@InProceedings{froehlich2021icbta,
author = {Michael Froehlich AND Philipp Hulm AND Florian Alt},
booktitle = {{Proceedings of the Fourth International Conference on Blockchain Technology and Applications}},
title = {{Under Pressure - A User-Centered Threat Model for Cryptocurrency Owners}},
year = {2021},
note = {froehlich2021icbta},
series = {ICBTA '21},
abstract = {Cryptocurrencies have gained popularity in recent years. However, for many users, keeping ownership of their cryptocurrency is a complex task. News reports frequently bear witness to scams, hacked exchanges, and fortunes beyond retrieval. However, we lack a systematic understanding of user-centered cryptocurrency threats, as causes leading to loss are scattered across publications. To address this gap, we conducted a focus group (n=6) and an expert elicitation study (n=25) following a three-round Delphi process with a heterogeneous group of blockchain and security experts from academia and industry.We contribute the first systematic overview of threats cryptocurrency users are exposed to and propose six overarching categories. Our work is complemented by a discussion on how the human-computer-interaction community can address these threats and how practitioners can use the model to understand situations in which users might find themselves under the pressure of an attack to ultimately engineer more secure systems.},
location = {Virtual},
owner = {florian},
timestamp = {2021.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/froehlich2021icbta.pdf},
video = {froehlich2021icbta},
}
R. Rivu, V. Mäkelä, S. Prange, S. D. Rodriguez, R. Piening, Y. Zhou, K. Köhle, K. Pfeuffer, Y. Abdelrahman, M. Hoppe, A. Schmidt, and F. Alt. Remote VR Studies – A Framework for Running Virtual Reality Studies Remotely Via Participant-Owned HMDs. ACM Transactions on Computer-Human Interaction (ToCHI), vol. 28, iss. 6, 2021. doi:10.1145/3472617
[BibTeX] [Abstract] [PDF]
We investigate opportunities and challenges of running virtual reality (VR) studies remotely. Today, many consumers own head-mounted displays (HMDs), allowing them to participate in scientific studies from their homes using their own equipment. Researchers can benefit from this approach by being able to recruit study populations normally out of their reach, and to conduct research at times when it is difficult to get people into the lab (cf. the COVID pandemic). In an initial online survey (N = 227), we assessed HMD owners’ demographics, their VR setups and their attitudes toward remote participation. We then identified different approaches to running remote studies and conducted two case studies for an in-depth understanding. We synthesize our findings into a framework for remote VR studies, discuss strengths and weaknesses of the different approaches, and derive best practices. Our work is valuable for Human-Computer Interaction (HCI) researchers conducting VR studies outside labs.
@Article{rivu2021tochi,
author = {Radiah Rivu and Ville Mäkelä and Sarah Prange and Sarah Delgado Rodriguez and Robin Piening and Yumeng Zhou and Kay Köhle and Ken Pfeuffer and Yomna Abdelrahman and Matthias Hoppe and Albrecht Schmidt and Florian Alt},
journal = {{ACM Transactions on Computer-Human Interaction (ToCHI)}},
title = {{Remote VR Studies -- A Framework for Running Virtual Reality Studies Remotely Via Participant-Owned HMDs}},
year = {2021},
issn = {1073-0516},
month = {nov},
note = {rivu2021tochi},
number = {6},
volume = {28},
abstract = {We investigate opportunities and challenges of running virtual reality (VR) studies remotely. Today, many consumers own head-mounted displays (HMDs), allowing them to participate in scientific studies from their homes using their own equipment. Researchers can benefit from this approach by being able to recruit study populations normally out of their reach, and to conduct research at times when it is difficult to get people into the lab (cf. the COVID pandemic). In an initial online survey (N = 227), we assessed HMD owners’ demographics, their VR setups and their attitudes toward remote participation. We then identified different approaches to running remote studies and conducted two case studies for an in-depth understanding. We synthesize our findings into a framework for remote VR studies, discuss strengths and weaknesses of the different approaches, and derive best practices. Our work is valuable for Human-Computer Interaction (HCI) researchers conducting VR studies outside labs.},
address = {New York, NY, USA},
articleno = {46},
doi = {10.1145/3472617},
numpages = {36},
publisher = {Association for Computing Machinery},
timestamp = {2021.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021tochi.pdf},
}
A. Nussbaum, J. Schuette, L. Hao, H. Schulzrinne, and F. Alt. Tremble: TRansparent Emission Monitoring with BLockchain Endorsement. In Proceedings of the 21 IEEE International Conference on Internet of Things (iThings’21), IEEE, 2021. doi:10.1109/iThings-GreenCom-CPSCom-SmartData-Cybermatics53846.2021.00024
[BibTeX] [Abstract] [PDF]
Since the monitoring of environmental emissions is mostly in the hands of regulatory authorities, collected data may not be easily observed by the interested public. Centrally stored data may also tempt the authorities or others to manipulate the historical record for political or liability reasons. To enable timely, transparent and integrity-protected collection and presentation of emission data, we propose and implement Tremble, an emission monitoring system based on blockchain and IoT sensors. Tremble employs a hybrid storage approach to lower the cost of storage compared to using a pure blockchain without losing data integrity. It provides web interfaces and visualizations for end users to query emission values they are concerned about. Qualitative and quantitative studies involving a total of 62 subjects demonstrate the usability of the system.
@InProceedings{nussbaum2021ithings,
author = {Alexander Nussbaum AND Johannes Schuette AND Luoyao Hao AND Henning Schulzrinne AND Florian Alt},
booktitle = {{Proceedings of the 21 IEEE International Conference on Internet of Things}},
title = {{Tremble: TRansparent Emission Monitoring with BLockchain Endorsement}},
year = {2021},
note = {nussbaum2021ithings},
publisher = {IEEE},
series = {iThings'21},
abstract = {Since the monitoring of environmental emissions is mostly in the hands of regulatory authorities, collected data may not be easily observed by the interested public. Centrally stored data may also tempt the authorities or others to manipulate the historical record for political or liability reasons. To enable timely, transparent and integrity-protected collection and presentation of emission data, we propose and implement Tremble, an emission monitoring system based on blockchain and IoT sensors. Tremble employs a hybrid storage approach to lower the cost of storage compared to using a pure blockchain without losing data integrity. It provides web interfaces and visualizations for end users to query emission values they are concerned about. Qualitative and quantitative studies involving a total of 62 subjects demonstrate the usability of the system.},
doi = {10.1109/iThings-GreenCom-CPSCom-SmartData-Cybermatics53846.2021.00024},
location = {Melbourne, Australia},
timestamp = {2021.11.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/nussbaum2021ithings.pdf},
}
K. Marky, S. Prange, M. Mühlhäuser, and F. Alt. Roles Matter! Understanding Differences in the Privacy Mental Models of Smart Home Visitors and Resident. In Proceedings of the 19th International Conference on Mobile and Ubiquitous Multimedia (MUM’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3490632.3490664
[BibTeX] [Abstract] [PDF]
In this paper, we contribute an in-depth study of the mental models of various roles in smart home ecosystems. In particular, we compared mental models regarding data collection among residents (primary users) and visitors of a smart home in a qualitative study (N=30) to better understand how bystanders’ specific privacy needs can be addressed. Our results suggest that bystanders have a limited understanding of how smart devices collect and store sensitive data about them. Misconceptions in bystanders’ mental models result in missing awareness and ultimately limit their ability to protect their privacy. We discuss the limitations of existing solutions and challenges for the design of future smart home environments that reflect the privacy concerns of users and bystanders alike, meant to inform the design of future privacy interfaces for IoT devices.
@InProceedings{marky2021mum,
author = {Marky, Karola AND Prange, Sarah AND Mühlhäuser, Max AND Alt, Florian},
booktitle = {{Proceedings of the 19th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Roles Matter! Understanding Differences in the Privacy Mental Models of Smart Home Visitors and Resident}},
year = {2021},
address = {New York, NY, USA},
note = {marky2021mum},
publisher = {Association for Computing Machinery},
series = {MUM'21},
abstract = {In this paper, we contribute an in-depth study of the mental models of various roles in smart home ecosystems. In particular, we compared mental models regarding data collection among residents (primary users) and visitors of a smart home in a qualitative study (N=30) to better understand how bystanders’ specific privacy needs can be addressed. Our results suggest that bystanders have a limited understanding of how smart devices collect and store sensitive data about them. Misconceptions in bystanders' mental models result in missing awareness and ultimately limit their ability to protect their privacy. We discuss the limitations of existing solutions and challenges for the design of future smart home environments that reflect the privacy concerns of users and bystanders alike, meant to inform the design of future privacy interfaces for IoT devices.},
doi = {10.1145/3490632.3490664},
location = {Leuven, Belgium},
timestamp = {2021.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/marky2021mum.pdf},
}
K. Pfeuffer, A. Dinc, J. Obernolte, R. Rivu, Y. Abdrabou, F. Schelter, Y. Abdelrahman, and F. Alt. Bi-3D: Bi-Manual Pen-and-Touch Interaction for 3D Manipulation on Tablets. In Proceedings of the 34th ACM Symposium on User Interface Software and Technology (UIST ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3472749.3474741
[BibTeX] [Abstract] [PDF] [Video]
Tablets are attractive for design work anywhere, but 3D manipulations are notoriously difficult. We explore how engaging the stylus and multi-touch in concert can render such tasks easier. We introduce Bi-3D, an interaction concept where touch gestures are combined with 2D pen commands for 3D manipulation. For example, for a fast and intuitive 3D drag & drop technique: the pen drags the object on-screen, and parallel pinch-to-zoom moves it in the third dimension. In this paper, we describe the Bi-3D design space, crossing two-handed input and the degrees-of-freedom (DOF) of 3D manipulation and navigation tasks. We demonstrate sketching and manipulation tools in a prototype 3D design application, where users can fluidly combine 3D operations through alternating and parallel use of the modalities. We evaluate the core technique, bi-manual 3DOF input, against widget and mid-air baselines in an object movement task. We find that Bi-3D is a fast and practical way for multi-dimensional manipulation of graphical objects, promising to facilitate 3D design on stylus and tablet devices.
@InProceedings{pfeuffer2021uist,
author = {Ken Pfeuffer AND Abdullatif Dinc AND Jan Obernolte AND Radiah Rivu AND Yasmeen Abdrabou AND Franziska Schelter AND Yomna Abdelrahman AND Florian Alt},
booktitle = {{Proceedings of the 34th ACM Symposium on User Interface Software and Technology}},
title = {{Bi-3D: Bi-Manual Pen-and-Touch Interaction for 3D Manipulation on Tablets}},
year = {2021},
address = {New York, NY, USA},
note = {pfeuffer2021uist},
publisher = {Association for Computing Machinery},
series = {UIST ’21},
abstract = {Tablets are attractive for design work anywhere, but 3D manipulations are notoriously difficult. We explore how engaging the stylus and multi-touch in concert can render such tasks easier. We introduce Bi-3D, an interaction concept where touch gestures are combined with 2D pen commands for 3D manipulation. For example, for a fast and intuitive 3D drag & drop technique: the pen drags the object on-screen, and parallel pinch-to-zoom moves it in the third dimension. In this paper, we describe the Bi-3D design space, crossing two-handed input and the degrees-of-freedom (DOF) of 3D manipulation and navigation tasks. We demonstrate sketching and manipulation tools in a prototype 3D design application, where users can fluidly combine 3D operations through alternating and parallel use of the modalities. We evaluate the core technique, bi-manual 3DOF input, against widget and mid-air baselines in an object movement task. We find that Bi-3D is a fast and practical way for multi-dimensional manipulation of graphical objects, promising to facilitate 3D design on stylus and tablet devices.},
doi = {10.1145/3472749.3474741},
isbn = {9781450386357},
location = {virtual},
timestamp = {2021.10.10},
url = {http://florian-alt.org/unibw/wp-content/publications/pfeuffer2021uist.pdf},
video = {pfeuffer2021uist},
}
F. Alt, D. Buschek, D. Heuss, and J. Müller. Orbuculum – Predicting When Users Intend To Leave Large Public Displays. Proceedings of the ACM on Interactive Mobile Wearable and Ubiquitous Technologies, vol. 5, iss. 1, 2021. doi:10.1145/3448075
[BibTeX] [Abstract] [PDF]
We present a system, predicting the point in time when users of a public display are about to leave. The ability to react to users’ intention to leave is valuable for researchers and practitioners alike: users can be presented additional content with the goal to maximize interaction times; they can be offered a discount coupon for redemption in a nearby store hence enabling new business models; or feedback can be collected from users right after they have finished interaction without interrupting their task. Our research consists of multiple steps: (1) We identified features that hint at users’ intention to leave from observations and video logs. (2) We implemented a system capable of detecting such features from Microsoft Kinect’s skeleton data and subsequently make a prediction. (3) We trained and deployed a prediction system with a Quiz game that reacts when users are about to leave (N=249), achieving an accuracy of 78%. The majority of users indeed reacted to the presented intervention.
@Article{alt2021imwut,
author = {Florian Alt AND Daniel Buschek AND David Heuss AND J\"{o}rg M\"{u}ller},
journal = {{Proceedings of the ACM on Interactive Mobile Wearable and Ubiquitous Technologies}},
title = {{Orbuculum -- Predicting When Users Intend To Leave Large Public Displays}},
year = {2021},
note = {alt2021imwut},
number = {1},
volume = {5},
abstract = {We present a system, predicting the point in time when users of a public display are about to leave. The ability to react to users’ intention to leave is valuable for researchers and practitioners alike: users can be presented additional content with the goal to maximize interaction times; they can be offered a discount coupon for redemption in a nearby store hence enabling new business models; or feedback can be collected from users right after they have finished interaction without interrupting their task. Our research consists of multiple steps: (1) We identified features that hint at users’ intention to leave from observations and video logs. (2) We implemented a system capable of detecting such features from Microsoft Kinect’s skeleton data and subsequently make a prediction. (3) We trained and deployed a prediction system with a Quiz game that reacts when users are about to leave (N=249), achieving an accuracy of 78%. The majority of users indeed reacted to the presented intervention.},
address = {New York, NY, USA},
articleno = {47},
doi = {10.1145/3448075},
issue_date = {Mar 2021},
numpages = {24},
publisher = {Association for Computing Machinery},
timestamp = {2021.10.01},
url = {http://florian-alt.org/unibw/wp-content/publications/alt2021imwut.pdf},
}
A. Saad, J. Liebers, U. Gruenefeld, F. Alt, and S. Schneegass. Understanding Bystanders’ Tendency to Shoulder Surf Smartphones Using 360-degree Videos in Virtual Reality. In Proceedings of the 22nd International Conference on Human-Computer Interaction with Mobile Devices and Services (MobileHCI ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3447526.3472058
[BibTeX] [Abstract] [PDF]
Shoulder surfing is an omnipresent risk for smartphone users. However, investigating these attacks in the wild is difficult because of either privacy concerns, lack of consent, or the fact that asking for consent would influence people’s behavior (e.g., they could try to avoid looking at smartphones). Thus, we propose utilizing 360-degree videos in Virtual Reality (VR), recorded in staged real-life situations on public transport. Despite differences between perceiving videos in VR and experiencing real-world situations, we believe this approach to allow novel insights on observers’ tendency to shoulder surf another person’s phone authentication and interaction to be gained. By conducting a study (N=16), we demonstrate that a better understanding of shoulder surfers’ behavior can be obtained by analyzing gaze data during video watching and comparing it to post-hoc interview responses. On average, participants looked at the phone for about 11% of the time it was visible and could remember half of the applications used.
@InProceedings{saad2021mobilehci,
author = {Alia Saad AND Jonathan Liebers AND Uwe Gruenefeld AND Florian Alt AND Stefan Schneegass},
booktitle = {{Proceedings of the 22nd International Conference on Human-Computer Interaction with Mobile Devices and Services}},
title = {{Understanding Bystanders' Tendency to Shoulder Surf Smartphones Using 360-degree Videos in Virtual Reality}},
year = {2021},
address = {New York, NY, USA},
note = {saad2021mobilehci},
publisher = {Association for Computing Machinery},
series = {MobileHCI '21},
abstract = {Shoulder surfing is an omnipresent risk for smartphone users. However, investigating these attacks in the wild is difficult because of either privacy concerns, lack of consent, or the fact that asking for consent would influence people's behavior (e.g., they could try to avoid looking at smartphones). Thus, we propose utilizing 360-degree videos in Virtual Reality (VR), recorded in staged real-life situations on public transport. Despite differences between perceiving videos in VR and experiencing real-world situations, we believe this approach to allow novel insights on observers' tendency to shoulder surf another person's phone authentication and interaction to be gained. By conducting a study (N=16), we demonstrate that a better understanding of shoulder surfers' behavior can be obtained by analyzing gaze data during video watching and comparing it to post-hoc interview responses. On average, participants looked at the phone for about 11% of the time it was visible and could remember half of the applications used.},
doi = {10.1145/3447526.3472058},
location = {Toulouse, France},
timestamp = {2021.09.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/saad2021mobilehci.pdf},
}
S. Prange, C. George, and F. Alt. Design Considerations for Usable Authentication in Smart Homes. In Proceedings of the Conference on Mensch Und Computer (MuC ’21), Association for Computing Machinery, New York, NY, USA, 2021, p. 311–324. doi:10.1145/3473856.3473878
[BibTeX] [Abstract] [PDF]
Smart home devices are on the rise. To provide their rich variety of features, they collect, store and process a considerable amount of (potentially sensitive) user data. However, authentication mechanisms on such devices a) have limited usability or b) are non-existing. To close this gap, we investigated, on one hand, users’ perspectives towards potential privacy and security risks as well as how they imagine usable authentication mechanisms in future smart homes. On the other hand, we considered security experts’ perspectives on authentication for smart homes. In particular, we conducted semi-structured interviews (N=20) with potential smart home users using the story completion method and a focus group with security experts (N=10). We found what kind of devices users would choose and why, potential challenges regarding privacy and security, and potential solutions. We discussed and verified these with security experts. We derive and reflect on a set of design implications for usable authentication mechanisms for smart homes and suggest directions for future research. Our work can assist designers and practitioners when implementing appropriate security mechanisms for smart homes.
@InProceedings{prange2021muc,
author = {Sarah Prange and Cenu George AND Florian Alt},
booktitle = {{Proceedings of the Conference on Mensch Und Computer}},
title = {{Design Considerations for Usable Authentication in Smart Homes}},
year = {2021},
address = {New York, NY, USA},
note = {prange2021muc},
pages = {311–324},
publisher = {Association for Computing Machinery},
series = {MuC '21},
abstract = {Smart home devices are on the rise. To provide their rich variety of features, they collect, store and process a considerable amount of (potentially sensitive) user data. However, authentication mechanisms on such devices a) have limited usability or b) are non-existing. To close this gap, we investigated, on one hand, users’ perspectives towards potential privacy and security risks as well as how they imagine usable authentication mechanisms in future smart homes. On the other hand, we considered security experts’ perspectives on authentication for smart homes. In particular, we conducted semi-structured interviews (N=20) with potential smart home users using the story completion method and a focus group with security experts (N=10). We found what kind of devices users would choose and why, potential challenges regarding privacy and security, and potential solutions. We discussed and verified these with security experts. We derive and reflect on a set of design implications for usable authentication mechanisms for smart homes and suggest directions for future research. Our work can assist designers and practitioners when implementing appropriate security mechanisms for smart homes.},
doi = {10.1145/3473856.3473878},
isbn = {9781450386456},
keywords = {usable security, authentication, smart homes, privacy, thematic analysis, smart devices, story completion},
location = {Ingolstadt, Germany},
numpages = {14},
timestamp = {2021.09.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2021muc.pdf},
}
R. Rivu, V. Mäkelä, M. Hassib, Y. Abdelrahman, and F. Alt. Exploring how Saliency Affects Attention in Virtual Reality. In Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’21), Springer, Berlin-Heidelberg, Germany, 2021. doi:10.1007/978-3-030-85607-6_10
[BibTeX] [Abstract] [PDF]
In this paper, we investigate how changes in the saliency of the Virtual Environment (VE) affect our visual attention during different tasks. We investigate if – similar to the real-world – users are attracted to the most salient regions in the VE. This knowledge will help researchers design optimal VR environments, purposefully direct the attention of users, and avoid unintentional distractions. We conducted a user study (N=30) where participants performed tasks (video watching, object stacking, visual search, waiting) with two different saliency conditions in the virtual environment. Our findings suggest that while participants notice the differences in saliency, their visual attention is not diverted towards the salient regions when they are performing tasks.
@InProceedings{rivu2021interact3,
author = {Radiah Rivu AND Ville Mäkelä AND Mariam Hassib AND Yomna Abdelrahman AND Florian Alt},
booktitle = {{Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Exploring how Saliency Affects Attention in Virtual Reality}},
year = {2021},
address = {Berlin-Heidelberg, Germany},
month = {4},
note = {rivu2021interact3},
publisher = {Springer},
series = {INTERACT '21},
abstract = {In this paper, we investigate how changes in the saliency of the Virtual Environment (VE) affect our visual attention during different tasks.
We investigate if - similar to the real-world - users are attracted to the most salient regions in the VE. This knowledge will help researchers design optimal VR environments, purposefully direct the attention of users, and avoid unintentional distractions. We conducted a user study (N=30) where participants performed tasks (video watching, object stacking, visual search, waiting) with two different saliency conditions in the virtual environment. Our findings suggest that while participants notice the differences in saliency, their visual attention is not diverted towards the salient regions when they are performing tasks.},
day = {1},
doi = {10.1007/978-3-030-85607-6_10},
isbn = {978-3-030-85607-6},
language = {English},
location = {Bari, Italy},
owner = {florian},
timestamp = {2021.09.03},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021interact3.pdf},
}
R. Piening, K. Pfeuffer, A. M. Augusto Esteves, S. Prange, P. Schroeder, and F. Alt. Gaze-adaptive Information Access in AR: Empirical Study and Field-Deployment. In Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’21), Springer, Berlin-Heidelberg, Germany, 2021. doi:10.1007/978-3-030-85623-6_32
[BibTeX] [Abstract] [PDF]
In this paper, we explore how state-of-the-art methods of emotion elicitation can be adapted in virtual reality (VR). We envision that emotion research could be conducted in VR for various benefits, such as switching study conditions and settings on the fly and conducting studies using stimuli that are not easily accessible in the real world such as to induce fear. To this end, we conducted a user study (N=39) where we measured how different emotion elicitation methods (audio, video, image, autobiographical memory recall) perform in VR compared to the real world. We found that elicitation methods produce largely comparable results between VR and the real world, but overall participants experience slightly stronger valence and arousal in VR. Emotions faded over time following the same pattern in both worlds. Our findings are beneficial to researchers and practitioners studying emotional user interfaces in VR.
@InProceedings{piening2021interact,
author = {Robin Piening AND Ken Pfeuffer AND Augusto Esteves, ANDTim Mittermeier AND Sarah Prange AND Philippe Schroeder AND Florian Alt},
booktitle = {{Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Gaze-adaptive Information Access in AR: Empirical Study and Field-Deployment}},
year = {2021},
address = {Berlin-Heidelberg, Germany},
month = {4},
note = {piening2021interact},
publisher = {Springer},
series = {INTERACT '21},
abstract = {In this paper, we explore how state-of-the-art methods of emotion elicitation can be adapted in virtual reality (VR). We envision that emotion research could be conducted in VR for various benefits, such as switching study conditions and settings on the fly and conducting studies using stimuli that are not easily accessible in the real world such as to induce fear. To this end, we conducted a user study (N=39) where we measured how different emotion elicitation methods (audio, video, image, autobiographical memory recall) perform in VR compared to the real world. We found that elicitation methods produce largely comparable results between VR and the real world, but overall participants experience slightly stronger valence and arousal in VR. Emotions faded over time following the same pattern in both worlds. Our findings are beneficial to researchers and practitioners studying emotional user interfaces in VR.},
day = {1},
doi = {10.1007/978-3-030-85623-6_32},
isbn = {978-3-030-85623-6},
language = {English},
location = {Bari, Italy},
owner = {florian},
timestamp = {2021.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/piening2021interact.pdf},
}
R. Rivu, R. Jiang, V. Mäkelä, M. Hassib, and F. Alt. Emotion ElicitationTechniques in Virtual Reality. In Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’21), Springer, Berlin-Heidelberg, Germany, 2021. doi:10.1007/978-3-030-85623-6_8
[BibTeX] [Abstract] [PDF]
In this paper, we explore how state-of-the-art methods of emotion elicitation can be adapted in virtual reality (VR). We envision that emotion research could be conducted in VR for various benefits, such as switching study conditions and settings on the fly and conducting studies using stimuli that are not easily accessible in the real world such as to induce fear. To this end, we conducted a user study (N=39) where we measured how different emotion elicitation methods (audio, video, image, autobiographical memory recall) perform in VR compared to the real world. We found that elicitation methods produce largely comparable results between VR and the real world, but overall participants experience slightly stronger valence and arousal in VR. Emotions faded over time following the same pattern in both worlds. Our findings are beneficial to researchers and practitioners studying emotional user interfaces in VR.
@InProceedings{rivu2021interact1,
author = {Radiah Rivu AND Ruoyu Jiang AND Ville Mäkelä AND Mariam Hassib AND Florian Alt},
booktitle = {{Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Emotion ElicitationTechniques in Virtual Reality}},
year = {2021},
address = {Berlin-Heidelberg, Germany},
month = {4},
note = {rivu2021interact1},
publisher = {Springer},
series = {INTERACT '21},
abstract = {In this paper, we explore how state-of-the-art methods of emotion elicitation can be adapted in virtual reality (VR). We envision that emotion research could be conducted in VR for various benefits, such as switching study conditions and settings on the fly and conducting studies using stimuli that are not easily accessible in the real world such as to induce fear. To this end, we conducted a user study (N=39) where we measured how different emotion elicitation methods (audio, video, image, autobiographical memory recall) perform in VR compared to the real world. We found that elicitation methods produce largely comparable results between VR and the real world, but overall participants experience slightly stronger valence and arousal in VR. Emotions faded over time following the same pattern in both worlds. Our findings are beneficial to researchers and practitioners studying emotional user interfaces in VR.},
day = {1},
doi = {10.1007/978-3-030-85623-6_8},
isbn = {978-3-030-85623-6},
language = {English},
location = {Bari, Italy},
owner = {florian},
timestamp = {2021.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021interact1.pdf},
}
R. Rivu, Y. Zhou, R. Welsch, V. Mäkelä, and F. Alt. When Friends become Strangers: Understandingthe Influence of Avatar Gender On Interpersonal Distance in Virtual Reality. In Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’21), Springer, Berlin-Heidelberg, Germany, 2021. doi:10.1007/978-3-030-85607-6_16
[BibTeX] [Abstract] [PDF]
{In this paper, we investigate how mismatches between biological gender and avatar gender affect interpersonal distance (IPD) in virtual reality (VR). An increasing number of VR experiences and online platforms like Rec Room and VRChat allow users to assume other genders through customized avatars. While the effects of acquaintanceship and gender have been studied with regard to proxemic behavior, the effect of changed genders remains largely unexplored. We conducted a user study (N = 40
@InProceedings{rivu2021interact2,
author = {Radiah Rivu AND Yumang Zhou AND Robin Welsch AND Ville Mäkelä AND Florian Alt},
booktitle = {{Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{When Friends become Strangers: Understandingthe Influence of Avatar Gender On Interpersonal Distance in Virtual Reality}},
year = {2021},
address = {Berlin-Heidelberg, Germany},
month = {4},
note = {rivu2021interact2},
publisher = {Springer},
series = {INTERACT '21},
abstract = {In this paper, we investigate how mismatches between biological gender and avatar gender affect interpersonal distance (IPD) in virtual reality (VR). An increasing number of VR experiences and online platforms like Rec Room and VRChat allow users to assume other genders through customized avatars. While the effects of acquaintanceship and gender have been studied with regard to proxemic behavior, the effect of changed genders remains largely unexplored. We conducted a user study (N = 40, friends = 20, strangers = 20) where users played a two-player collaborative game in Rec Room using both male and female avatars. We found that with swapped avatar genders, the preferred distance increased between friends but not between strangers. We discuss how our results can inform researchers and designers in the domain of multi-user VR.},
day = {1},
doi = {10.1007/978-3-030-85607-6_16},
isbn = {978-3-030-85607-6},
language = {English},
location = {Bari, Italy},
owner = {florian},
timestamp = {2021.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021interact2.pdf},
}
S. Prange, S. Mayer, M. Bittl, M. Hassib, and F. Alt. Investigating User Perceptions Towards Wearable Mobile Electromyography. In Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’21), Springer, Berlin-Heidelberg, Germany, 2021. doi:10.1007/978-3-030-85610-6_20
[BibTeX] [Abstract] [PDF]
Wearables capture physiological user data, enabling novel user interfaces that can identify users, adapt their user interface, and contribute to the quantified self. At the same time, little is known about users’ perception of this new technology. In this paper, we present findings from a user study (N=36) in which participants used an electromyography (EMG) wearable and a visualization of the data that can be collected using EMG wearables. We found that participants are highly unaware of what EMG data can reveal about them. Allowing them to explore their physiological data makes them more reluctant to share this data. We conclude with deriving guidelines, to help designers of physiological data-based user interfaces to (a) protect users’ privacy, (b) better inform them, and (c) ultimately support the uptake of this technology.
@InProceedings{prange2021interact,
author = {Sarah Prange AND Sven Mayer AND Maria-Lena Bittl AND Mariam Hassib AND Florian Alt},
booktitle = {{Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Investigating User Perceptions Towards Wearable Mobile Electromyography}},
year = {2021},
address = {Berlin-Heidelberg, Germany},
month = {21},
note = {prange2021interact},
publisher = {Springer},
series = {INTERACT '21},
abstract = {Wearables capture physiological user data, enabling novel user interfaces that can identify users, adapt their user interface, and contribute to the quantified self. At the same time, little is known about users' perception of this new technology. In this paper, we present findings from a user study (N=36) in which participants used an electromyography (EMG) wearable and a visualization of the data that can be collected using EMG wearables. We found that participants are highly unaware of what EMG data can reveal about them. Allowing them to explore their physiological data makes them more reluctant to share this data. We conclude with deriving guidelines, to help designers of physiological data-based user interfaces to (a) protect users' privacy, (b) better inform them, and (c) ultimately support the uptake of this technology.},
day = {1},
doi = {10.1007/978-3-030-85610-6_20},
language = {English},
location = {Bari, Italy},
owner = {florian},
timestamp = {2021.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2021interact.pdf},
}
M. Froehlich, C. Kobiella, A. Schmidt, and F. Alt. Is It Better With Onboarding? Improving First-Time Cryptocurrency App Experiences. In Designing Interactive Systems Conference 2021 (DIS ’21), Association for Computing Machinery, New York, NY, USA, 2021, p. 78–89. doi:10.1145/3461778.3462047
[BibTeX] [Abstract] [PDF] [Video]
Engaging first-time users of mobile apps is challenging. Onboarding task flows are designed to minimize the drop out of users. To this point, there is little scientific insight into how to design these task flows. We explore this question with a specific focus on financial applications, which pose a particularly high hurdle and require significant trust. We address this question by combining two approaches. We first conducted semi-structured interviews (n=16) exploring users’ meaning-making when engaging with new mobile applications in general. We then prototyped and evaluated onboarding task flows (n=16) for two mobile cryptocurrency apps using the minimalist instruction framework. Our results suggest that well-designed onboarding processes can improve the perceived usability of first-time users for feature-rich mobile apps. We discuss how the expectations users voiced during the interview study can be met by applying instructional design principles and reason that the minimalist instruction framework for mobile onboarding insights presents itself as a useful design method for practitioners to develop onboarding processes and also to identify when not to.
@InProceedings{froehlich2021dis2,
author = {Froehlich, Michael and Kobiella, Charlotte and Schmidt, Albrecht and Alt, Florian},
booktitle = {{Designing Interactive Systems Conference 2021}},
title = {{Is It Better With Onboarding? Improving First-Time Cryptocurrency App Experiences}},
year = {2021},
address = {New York, NY, USA},
note = {froehlich2021dis2},
pages = {78–89},
publisher = {Association for Computing Machinery},
series = {DIS '21},
abstract = {Engaging first-time users of mobile apps is challenging. Onboarding task flows are designed to minimize the drop out of users. To this point, there is little scientific insight into how to design these task flows. We explore this question with a specific focus on financial applications, which pose a particularly high hurdle and require significant trust. We address this question by combining two approaches. We first conducted semi-structured interviews (n=16) exploring users’ meaning-making when engaging with new mobile applications in general. We then prototyped and evaluated onboarding task flows (n=16) for two mobile cryptocurrency apps using the minimalist instruction framework. Our results suggest that well-designed onboarding processes can improve the perceived usability of first-time users for feature-rich mobile apps. We discuss how the expectations users voiced during the interview study can be met by applying instructional design principles and reason that the minimalist instruction framework for mobile onboarding insights presents itself as a useful design method for practitioners to develop onboarding processes and also to identify when not to.},
doi = {10.1145/3461778.3462047},
isbn = {9781450384766},
numpages = {12},
owner = {florian},
timestamp = {2021.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/froehlich2021dis2.pdf},
video = {froehlich2021dis2},
}
M. Froehlich, M. Wagenhaus, A. Schmidt, and F. Alt. Don’t Stop Me Now! Exploring Challenges Of First-Time Cryptocurrency Users. In Proceedings of the 2021 ACM Conference on Designing Interactive Systems (DIS ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3461778.3462071
[BibTeX] [Abstract] [PDF] [Video]
Cryptocurrencies have increasingly gained interest in practice and research alike. Current research in the HCI community predominantly focuses on understanding the behavior of existing cryptocurrency users. Little attention has been given to early users and the challenges they encounter. However, understanding how interfaces of cryptocurrency systems support, impede, or even prevent adoption through new users is essential to develop better, more inclusive solutions. To close this gap, we conducted a user study(n=34) exploring challenges first-time cryptocurrency users face. Our analysis reveals that even popular wallets are not designed for novice users’ needs, stopping them when they would be ready to engage with the technology. We identify multiple challenges ranging from general user interface issues to finance and cryptocurrency specific ones. We argue that these challenges can and should be addressed by the HCI community and present implications for building better cryptocurrency systems for novice users.
@InProceedings{froehlich2021dis1,
author = {Michael Froehlich AND Maurizio Wagenhaus AND Albrecht Schmidt AND Florian Alt},
booktitle = {{Proceedings of the 2021 ACM Conference on Designing Interactive Systems}},
title = {{Don't Stop Me Now! Exploring Challenges Of First-Time Cryptocurrency Users}},
year = {2021},
address = {New York, NY, USA},
note = {froehlich2021dis1},
publisher = {Association for Computing Machinery},
series = {DIS '21},
abstract = {Cryptocurrencies have increasingly gained interest in practice and research alike. Current research in the HCI community predominantly focuses on understanding the behavior of existing cryptocurrency users. Little attention has been given to early users and the challenges they encounter. However, understanding how interfaces of cryptocurrency systems support, impede, or even prevent adoption through new users is essential to develop better, more inclusive solutions. To close this gap, we conducted a user study(n=34) exploring challenges first-time cryptocurrency users face. Our analysis reveals that even popular wallets are not designed for novice users' needs, stopping them when they would be ready to engage with the technology. We identify multiple challenges ranging from general user interface issues to finance and cryptocurrency specific ones. We argue that these challenges can and should be addressed by the HCI community and present implications for building better cryptocurrency systems for novice users.},
doi = {10.1145/3461778.3462071},
isbn = {9781450384766},
location = {Virtual},
owner = {florian},
timestamp = {2021.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/froehlich2021dis1.pdf},
video = {froehlich2021dis1},
}
S. R. R. Rivu, Y. Abdrabou, Y. Abdelrahman, K. Pfeuffer, D. Kern, C. Neuert, D. Buschek, and F. Alt. Did you Understand this? Leveraging Gaze Behavior to Assess Questionnaire Comprehension. In Proceedings of the 2021 ACM Symposium on Eye Tracking Research & Applications (COGAIN ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3448018.3458018
[BibTeX] [Abstract] [PDF] [Talk]
Reading is one of the primary channels to gain information. Due to the growing amount of textual information we encounter, techniques are acquired to decide on the relevance of a text and how to grasp its content. We propose the usage of gaze behaviour as an assistive tool to assess the users’ reading comprehension. We investigate how problems in understanding text – specifically a word or a sentence – while filling in questionnaires are reflected in gaze behaviour. To identify text comprehension problems, while filling a questionnaire, and their correlation with the gaze features, we collected data from 42 participant. In a follow-up study (N=30), we evoked comprehension problems and features they affect and quantified users’ gaze behaviour. Our findings implies that comprehension problems could be reflected in a set of gaze features, namely, in the number of fixations, duration of fixations, and number of regressions. Our findings not only demonstrate the potential of eye tracking for assessing reading comprehension but also pave the way for researchers and designers to build novel questionnaire tools that instantly mitigate problems in reading comprehension.
@InProceedings{rivu2021cogain,
author = {Sheikh Radiah Rahim Rivu AND Yasmeen Abdrabou AND Yomna Abdelrahman AND Ken Pfeuffer AND Dagmar Kern AND Cornelia Neuert AND Daniel Buschek AND Florian Alt},
booktitle = {{Proceedings of the 2021 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{Did you Understand this? Leveraging Gaze Behavior to Assess Questionnaire Comprehension}},
year = {2021},
address = {New York, NY, USA},
note = {rivu2021cogain},
publisher = {Association for Computing Machinery},
series = {COGAIN '21},
abstract = {Reading is one of the primary channels to gain information. Due to the growing amount of textual information we encounter, techniques are acquired to decide on the relevance of a text and how to grasp its content. We propose the usage of gaze behaviour as an assistive tool to assess the users’ reading comprehension. We investigate how problems in understanding text – specifically a word or a sentence – while filling in questionnaires are reflected in gaze behaviour. To identify text comprehension problems, while filling a questionnaire, and their correlation with the gaze features, we collected data from 42 participant. In a follow-up study (N=30), we evoked comprehension problems and features they affect and quantified users’ gaze behaviour. Our findings implies that comprehension problems could be reflected in a set of gaze features, namely, in the number of fixations, duration of fixations, and number of regressions. Our findings not only demonstrate the potential of eye tracking for assessing reading comprehension but also pave the way for researchers and designers to build novel questionnaire tools that instantly mitigate problems in reading comprehension.},
doi = {10.1145/3448018.3458018},
location = {Stuttgart, Germany},
numpages = {5},
talk = {rivu2021cogain},
timestamp = {2021.06.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021cogain.pdf},
}
Y. Abdrabou, A. Shams, M. O. Mantawy, A. Ahmad Khan, M. Khamis, F. Alt, and Y. Abdelrahman. GazeMeter: Exploring the Usage of Gaze Behaviour to Enhance Password Assessments. In Proceedings of the 2021 ACM Symposium on Eye Tracking Research & Applications (ETRA ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3448017.3457384
[BibTeX] [Abstract] [PDF] [Video]
We investigate the use of gaze behaviour as a means to assess password strength as perceived by users. We contribute to the effort of making users choose passwords that are robust against guessing-attacks. Our particular idea is to consider also the users’ understanding of password strength in security mechanisms. We demonstrate how eye tracking can enable this: by analysing people’s gaze behaviour during password creation, its strength can be determined. To demonstrate the feasibility of this approach, we present a proof of concept study (N = 15) in which we asked participants to create weak and strong passwords. Our findings reveal that it is possible to estimate password strength from gaze behaviour with an accuracy of 86% using Machine Learning. Thus, we enable research on novel interfaces that consider users’ understanding with the ultimate goal of making users choose stronger passwords.
@InProceedings{abdrabou2021etra,
author = {Abdrabou, Yasmeen and Shams, Ahmed and Mantawy, Mohamed Omar and Ahmad Khan, Anam and Khamis, Mohamed and Alt, Florian and Abdelrahman, Yomna},
booktitle = {{Proceedings of the 2021 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{GazeMeter: Exploring the Usage of Gaze Behaviour to Enhance Password Assessments}},
year = {2021},
address = {New York, NY, USA},
note = {abdrabou2021etra},
publisher = {Association for Computing Machinery},
series = {ETRA '21},
abstract = {We investigate the use of gaze behaviour as a means to assess password strength as perceived by users. We contribute to the effort of making users choose passwords that are robust against guessing-attacks. Our particular idea is to consider also the users’ understanding of password strength in security mechanisms. We demonstrate how eye tracking can enable this: by analysing people’s gaze behaviour during password creation, its strength can be determined. To demonstrate the feasibility of this approach, we present a proof of concept study (N = 15) in which we asked participants to create weak and strong passwords. Our findings reveal that it is possible to estimate password strength from gaze behaviour with an accuracy of 86% using Machine Learning. Thus, we enable research on novel interfaces that consider users’ understanding with the ultimate goal of making users choose stronger passwords.},
articleno = {9},
doi = {10.1145/3448017.3457384},
isbn = {9781450383448},
numpages = {12},
owner = {florian},
timestamp = {2021.06.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdrabou2020etra.pdf},
video = {abdrabou2021etra},
}
S. Faltaous, A. Abdulmaksoud, M. Kempe, F. Alt, and S. Schneegass. GeniePutt: Augmenting human motor skills through electrical muscle stimulation. it – Information Technology, 2021. doi:10.1515/itit-2020-0035
[BibTeX] [PDF]
@Article{faltaous2021it,
author = {Sarah Faltaous and Aya Abdulmaksoud and Markus Kempe and Florian Alt and Stefan Schneegass},
journal = {{it -- Information Technology}},
title = {{GeniePutt: Augmenting human motor skills through electrical muscle stimulation}},
year = {2021},
note = {faltaous2021it},
doi = {10.1515/itit-2020-0035},
timestamp = {2021.05.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/faltaous2021it.pdf},
}
A. Schmidt, F. Alt, and V. Mäkelä. Evaluation in Human-Computer Interaction – Beyond Lab Studies. In Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems (CHI’21 EA), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3411763.3445022
[BibTeX] [Abstract] [PDF]
Many research contributions in human-computer interaction are based on user studies in the lab. However, lab studies are not always possible, and they may come with significant challenges and limitations. In this course, we take a broader look at different approaches to doing research. We present a set of evaluation methods and research contributions that do not rely on user studies in labs. The discussion focuses on research approaches, data collection methods, and tools that can be conducted without direct interaction between the researchers and the participants.
@InProceedings{schmidt2021chiea,
author = {Schmidt, Albrecht and Alt, Florian and M\"{a}kel\"{a}, Ville},
booktitle = {{Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems}},
title = {{Evaluation in Human-Computer Interaction – Beyond Lab Studies}},
year = {2021},
address = {New York, NY, USA},
note = {schmidt2021chiea},
publisher = {Association for Computing Machinery},
series = {CHI'21 EA},
abstract = {Many research contributions in human-computer interaction are based on user studies
in the lab. However, lab studies are not always possible, and they may come with significant
challenges and limitations. In this course, we take a broader look at different approaches
to doing research. We present a set of evaluation methods and research contributions
that do not rely on user studies in labs. The discussion focuses on research approaches,
data collection methods, and tools that can be conducted without direct interaction
between the researchers and the participants.},
articleno = {142},
doi = {10.1145/3411763.3445022},
isbn = {9781450380959},
numpages = {4},
timestamp = {2021.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schmidt2021chiea.pdf},
}
R. Rivu, V. Mäkelä, S. Prange, S. D. Rodriguez, R. Piening, Y. Zhou, K. Köhle, K. Pfeuffer, Y. Abdelrahman, M. Hoppe, A. Schmidt, and F. Alt, Remote VR Studies – A Framework for Running Virtual Reality Studies Remotely Via Participant-Owned HMDs, 2021.
[BibTeX] [PDF]
@Misc{rivu2021arxiv,
author = {Radiah Rivu and Ville Mäkelä and Sarah Prange and Sarah Delgado Rodriguez and Robin Piening and Yumeng Zhou and Kay Köhle and Ken Pfeuffer and Yomna Abdelrahman and Matthias Hoppe and Albrecht Schmidt and Florian Alt},
note = {rivu2021arxiv},
title = {{Remote VR Studies -- A Framework for Running Virtual Reality Studies Remotely Via Participant-Owned HMDs}},
year = {2021},
archiveprefix = {arXiv},
eprint = {2102.11207},
primaryclass = {cs.HC},
timestamp = {2021.05.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021arxiv.pdf},
}
F. Alt. Out-of-the-Lab Research in Usable Security and Privacy. In Adjunct Proceedings of the 29th ACM Conference on User Modeling, Adaptation and Personalization (UMAP ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3450614.3464468
[BibTeX] [Abstract] [PDF]
The COVID pandemic made it challenging for usable security and privacy researchers around the globe to run experiments involving human subjects, specifically in cases where such experiments are conducted in controlled lab setting. Examples include but are not limited to (a) observing and collecting data on user behavior with the goal of (b) informing the design and (c) engineering novel concepts based on adaptation and personalization as well as (d) evaluating such concepts regarding user performance and robustness against different threat models. In this keynote I will set out with providing a brief introduction to and examples on our research on behavioral biometrics. I will then discuss how the current situation influences research requiring close work with human subjects in lab settings and outline approaches to address emerging issues. Finally, I will provide some examples of out-of-the-lab research and reflect on both challenges and opportunities of these approaches.
@InProceedings{alt2021apps,
author = {Florian Alt},
booktitle = {{Adjunct Proceedings of the 29th ACM Conference on User Modeling, Adaptation and Personalization}},
title = {{Out-of-the-Lab Research in Usable Security and Privacy}},
year = {2021},
address = {New York, NY, USA},
note = {alt2021apps},
organization = {New York, NY, USA},
publisher = {Association for Computing Machinery},
series = {UMAP '21},
abstract = {The COVID pandemic made it challenging for usable security and privacy researchers around the globe to run experiments involving human subjects, specifically in cases where such experiments are conducted in controlled lab setting. Examples include but are not limited to (a) observing and collecting data on user behavior with the goal of (b) informing the design and (c) engineering novel concepts based on adaptation and personalization as well as (d) evaluating such concepts regarding user performance and robustness against different threat models. In this keynote I will set out with providing a brief introduction to and examples on our research on behavioral biometrics. I will then discuss how the current situation influences research requiring close work with human subjects in lab settings and outline approaches to address emerging issues. Finally, I will provide some examples of out-of-the-lab research and reflect on both challenges and opportunities of these approaches.},
doi = {10.1145/3450614.3464468},
timestamp = {2021.05.05},
url = {http://florian-alt.org/unibw/wp-content/publications/alt2021apps.pdf},
}
S. Prange, A. Shams, R. Piening, Y. Abdelrahman, and F. Alt. PriView – Exploring Visualisations Supporting Users’ Privacy Awareness. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems (CHI ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3411764.3445067
[BibTeX] [Abstract] [PDF] [Video]
There are many situations where using personal devices is not socially acceptable, or where nearby people present a privacy risk. For these situations, we developed two eyes-free interaction techniques that are difficult to notice: HiddenHaptics, that allows users to receive information through vibrotactile cues on a smartphone, and HideWrite, that allows users to write text messages by drawing on a dimmed smartwatch screen. We conducted three user studies to investigate whether, and how, these techniques can be used without being exposed. Our primary findings are (1) users can effectively hide their interactions while attending to a social situation, (2) users seek to interact when another person is speaking, and they also tend to hide the interaction using their body or furniture, and (3) users can sufficiently focus on the social situation despite their interaction, whereas non-users feel that observing the user hinders their ability to focus on the social activity.
@InProceedings{prange2021chi,
author = {Sarah Prange AND Ahmed Shams AND Robin Piening AND Yomna Abdelrahman AND Florian Alt},
booktitle = {{Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems}},
title = {{PriView -- Exploring Visualisations Supporting Users' Privacy Awareness}},
year = {2021},
address = {New York, NY, USA},
note = {prange2021chi},
publisher = {Association for Computing Machinery},
series = {CHI ’21},
abstract = {There are many situations where using personal devices is not socially acceptable, or where nearby people present a privacy risk. For these situations, we developed two eyes-free interaction techniques that are difficult to notice: HiddenHaptics, that allows users to receive information through vibrotactile cues on a smartphone, and HideWrite, that allows users to write text messages by drawing on a dimmed smartwatch screen. We conducted three user studies to investigate whether, and how, these techniques can be used without being exposed. Our primary findings are (1) users can effectively hide their interactions while attending to a social situation, (2) users seek to interact when another person is speaking, and they also tend to hide the interaction using their body or furniture, and (3) users can sufficiently focus on the social situation despite their interaction, whereas non-users feel that observing the user hinders their ability to focus on the social activity.},
doi = {10.1145/3411764.3445067},
isbn = {9781450367080},
location = {Yokohama, Japan},
timestamp = {2021.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2021chi.pdf},
video = {prange2021chi},
}
L. Müller, K. Pfeuffer, J. Gugenheimer, S. Prange, B. Pfleging, and F. Alt. SpatialProto: Using Real-World Captures for Rapid Prototyping of Mixed Reality Experiences. In Proceedings of the 2021CHI Conference on Human Factors in Computing Systems (CHI ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3411764.3445560
[BibTeX] [Abstract] [PDF]
Spatial computing systems that blend virtual and real worlds are increasingly becoming ubiquitous. However, creating experiences for spatial computing is difficult and requires skills in programming and 3D content creation, rendering them inaccessible to a wide user-group. We present SpatialProto, an in-situ spatial prototyping system that lowers the barrier for users to engage in spatial prototyping. With a depth-sensing capable Mixed Reality headset, SpatialProto lets users record animated objects of thereal-world environment (e.g. paper, clay, people or any other prop), extract only the relevant parts, and directly place and transform these recordings in their physical environment. We describe the design and implementation of SpatialProto, a user study evaluating the system’s prototype with non-expert users (n=9), and demonstrate applications where multiple captures are fused for compelling Mixed Reality experiences.
@InProceedings{mueller2021chi,
author = {Leon M\"{u}ller AND Ken Pfeuffer AND Jan Gugenheimer AND Sarah Prange AND Bastian Pfleging AND Florian Alt},
booktitle = {{Proceedings of the 2021CHI Conference on Human Factors in Computing Systems}},
title = {{SpatialProto: Using Real-World Captures for Rapid Prototyping of Mixed Reality Experiences}},
year = {2021},
address = {New York, NY, USA},
note = {mueller2021chi},
publisher = {Association for Computing Machinery},
series = {CHI ’21},
abstract = {Spatial computing systems that blend virtual and real worlds are increasingly becoming ubiquitous. However, creating experiences for spatial computing is difficult and requires skills in programming and 3D content creation, rendering them inaccessible to a wide user-group. We present SpatialProto, an in-situ spatial prototyping system that lowers the barrier for users to engage in spatial prototyping. With a depth-sensing capable Mixed Reality headset, SpatialProto lets users record animated objects of thereal-world environment (e.g. paper, clay, people or any other prop), extract only the relevant parts, and directly place and transform these recordings in their physical environment. We describe the design and implementation of SpatialProto, a user study evaluating the system's prototype with non-expert users (n=9), and demonstrate applications where multiple captures are fused for compelling Mixed Reality experiences.},
doi = {10.1145/3411764.3445560},
isbn = {9781450367080},
location = {Yokohama, Japan},
timestamp = {2021.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/mueller2021chi.pdf},
}
V. Mäekelä, J. Kleine, M. Hood, F. Alt, and A. Schmidt. Hidden Interaction Techniques: Concealed Information Acquisition and Texting on Smartphones and Wearables. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems (CHI ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3411764.3445504
[BibTeX] [Abstract] [PDF] [Video]
There are many situations where using personal devices is not socially acceptable, or where nearby people present a privacy risk. For these situations, we developed two eyes-free interaction techniques that are difficult to notice: HiddenHaptics, that allows users to receive information through vibrotactile cues on a smartphone, and HideWrite, that allows users to write text messages by drawing on a dimmed smartwatch screen. We conducted three user studies to investigate whether, and how, these techniques can be used without being exposed. Our primary findings are (1) users can effectively hide their interactions while attending to a social situation, (2) users seek to interact when another person is speaking, and they also tend to hide the interaction using their body or furniture, and (3) users can sufficiently focus on the social situation despite their interaction, whereas non-users feel that observing the user hinders their ability to focus on the social activity.
@InProceedings{maekelae2021chi,
author = {Ville M\"{a}ekel\"{a} AND Johannes Kleine AND Maxine Hood AND Florian Alt AND Albrecht Schmidt},
booktitle = {{Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems}},
title = {{Hidden Interaction Techniques: Concealed Information Acquisition and Texting on Smartphones and Wearables}},
year = {2021},
address = {New York, NY, USA},
note = {maekelae2021chi},
publisher = {Association for Computing Machinery},
series = {CHI ’21},
abstract = {There are many situations where using personal devices is not socially acceptable, or where nearby people present a privacy risk. For these situations, we developed two eyes-free interaction techniques that are difficult to notice: HiddenHaptics, that allows users to receive information through vibrotactile cues on a smartphone, and HideWrite, that allows users to write text messages by drawing on a dimmed smartwatch screen. We conducted three user studies to investigate whether, and how, these techniques can be used without being exposed. Our primary findings are (1) users can effectively hide their interactions while attending to a social situation, (2) users seek to interact when another person is speaking, and they also tend to hide the interaction using their body or furniture, and (3) users can sufficiently focus on the social situation despite their interaction, whereas non-users feel that observing the user hinders their ability to focus on the social activity.},
doi = {10.1145/3411764.3445504},
isbn = {9781450367080},
location = {Yokohama, Japan},
timestamp = {2021.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/maekelae2021chi.pdf},
video = {maekelae2021chi},
}
J. Liebers, U. Gruenefeld, L. Mecke, A. Saad, J. Auda, F. Alt, M. Abdelaziz, and S. Schneegass. Understanding User Identification in Virtual Reality through Behavioral Biometrics and the Effect of Body Normalization. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems (CHI ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3411764.3445528
[BibTeX] [Abstract] [PDF] [Video]
MVirtual Reality (VR) is becoming increasingly popular both in the entertainment and professional domains. Behavioral biometrics have recently been investigated as a means to continuously and implicitly identify users in VR. VR applications can specifically benefit from this, for example, to adapt the environment and user interface as well as to authenticate users. In this work, we conduct a lab study (N=16) to explore how accurately users can be identified during two task-driven scenarios based on their spatial movement. We show that an identification accuracy of up to 90% is possible across sessions recorded on different days. oreover, we investigate the role of users’ physiology on behavioral biometrics. In particular, we virtually alter and normalize users’ body proportions to examine the influence on behavior. We find that body normalization in general increases the identification rate, in some cases by up to 38%, hence it improves the performance of identification systems.
@InProceedings{liebers2021chi,
author = {Jonathan Liebers AND Uwe Gruenefeld AND Lukas Mecke AND Alia Saad AND Jonas Auda AND Florian Alt AND Mark Abdelaziz AND Stefan Schneegass},
booktitle = {{Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems}},
title = {{Understanding User Identification in Virtual Reality through Behavioral Biometrics and the Effect of Body Normalization}},
year = {2021},
address = {New York, NY, USA},
note = {liebers2021chi},
publisher = {Association for Computing Machinery},
series = {CHI ’21},
abstract = {MVirtual Reality (VR) is becoming increasingly popular both in the entertainment and professional domains. Behavioral biometrics have recently been investigated as a means to continuously and implicitly identify users in VR. VR applications can specifically benefit from this, for example, to adapt the environment and user interface as well as to authenticate users. In this work, we conduct a lab study (N=16) to explore how accurately users can be identified during two task-driven scenarios based on their spatial movement. We show that an identification accuracy of up to 90% is possible across sessions recorded on different days. oreover, we investigate the role of users' physiology on behavioral biometrics. In particular, we virtually alter and normalize users' body proportions to examine the influence on behavior. We find that body normalization in general increases the identification rate, in some cases by up to 38%, hence it improves the performance of identification systems.},
doi = {10.1145/3411764.3445528},
isbn = {9781450367080},
location = {Yokohama, Japan},
timestamp = {2021.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/liebers2021chi.pdf},
video = {liebers2021chi},
}
Y. Abdrabou, Y. Abdelrahman, M. Khamis, and F. Alt. Think Harder! Investigating the Effect of Password Strength on Cognitive Load during Password Creation. In Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems (CHIEA ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3411763.3451636
[BibTeX] [PDF] [Video]
@InProceedings{abdrabou2021chiea,
author = {Yasmeen Abdrabou AND Yomna Abdelrahman AND Mohamed Khamis and Alt, Florian},
booktitle = {{Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems}},
title = {{Think Harder! Investigating the Effect of Password Strength on Cognitive Load during Password Creation}},
year = {2021},
address = {New York, NY, USA},
note = {abdrabou2021chiea},
publisher = {Association for Computing Machinery},
series = {CHIEA ’21},
doi = {10.1145/3411763.3451636},
location = {Yokohama, Japan},
numpages = {8},
timestamp = {2021.05.02},
url = {http://florian-alt.org/unibw/wp-content/publications/abdrabou2021chiea.pdf},
video = {abdrabou2021chiea},
}
S. D. Rodriguez, S. Prange, L. Mecke, and F. Alt. ActPad – A Smart Desk Platform to Enable User Interaction with IoT Devices. In Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems (CHIEA ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3411763.3451825
[BibTeX] [PDF] [Video]
@InProceedings{delgado2021chiea,
author = {Sarah Delgado Rodriguez AND Prange, Sarah AND Lukas Mecke and Alt, Florian},
booktitle = {{Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems}},
title = {{ActPad -- A Smart Desk Platform to Enable User Interaction with IoT Devices}},
year = {2021},
address = {New York, NY, USA},
note = {delgado2021chiea},
publisher = {Association for Computing Machinery},
series = {CHIEA ’21},
doi = {10.1145/3411763.3451825},
location = {Yokohama, Japan},
numpages = {8},
timestamp = {2021.05.02},
url = {http://florian-alt.org/unibw/wp-content/publications/delgado2021chiea.pdf},
video = {delgado2021chiea},
}
S. Prange, K. Marky, and F. Alt. Usable Authentication in Multi-Device Ecosystems. In Proceedings of the CHI 2021 Workshop on User Experience for Multi-Device Ecosystems: Challenges and Opportunities (UX4MDE ’21), 2021.
[BibTeX] [PDF]
@InProceedings{prange2021ux4mde,
author = {Sarah Prange AND Karola Marky AND Florian Alt},
booktitle = {{Proceedings of the CHI 2021 Workshop on User Experience for Multi-Device Ecosystems: Challenges and Opportunities}},
title = {{Usable Authentication in Multi-Device Ecosystems}},
year = {2021},
note = {prange2021ux4mde},
series = {UX4MDE '21},
timestamp = {2021.04.30},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2021ux4mde.pdf},
}
M. Khamis and F. Alt, “Technology-Augmented Perception and Cognition,” in Technology-augmented perception and cognition, T. Dingler and E. Niforatos, Eds., Cham: Springer International Publishing, 2021, p. 257–279. doi:10.1007/978-3-030-30457-7_8
[BibTeX] [Abstract] [PDF]
In this chapter, we present a privacy and security framework for designers of technologies that augment humans’ cognitive and perceptive capabilities. The framework consists of several groups of questions, meant to guide designers during the different stages of the design process. The objective of our work is to support the need for considering implications of novel technologies with regard to privacy and security early in the design process rather than post-hoc. The framework is based on a thorough review of the technologies presented earlier on in this book as well as of prior research in the field of technology augmentation. From this review, we derived several themes that are not only valuable pointers for future work but also serve as a basis for the subsequent framework. We point out the need to focus on the following aspects: data handling, awareness, user consent, and the design of the user interface.
@InBook{khamis2021springer,
author = {Khamis, Mohamed and Alt, Florian},
chapter = {Privacy and Security in Augmentation Technologies},
editor = {Dingler, Tilman and Niforatos, Evangelos},
pages = {257--279},
publisher = {Springer International Publishing},
title = {{Technology-Augmented Perception and Cognition}},
year = {2021},
address = {Cham},
isbn = {978-3-030-30457-7},
note = {khamis2021springer},
abstract = {In this chapter, we present a privacy and security framework for designers of technologies that augment humans' cognitive and perceptive capabilities. The framework consists of several groups of questions, meant to guide designers during the different stages of the design process. The objective of our work is to support the need for considering implications of novel technologies with regard to privacy and security early in the design process rather than post-hoc. The framework is based on a thorough review of the technologies presented earlier on in this book as well as of prior research in the field of technology augmentation. From this review, we derived several themes that are not only valuable pointers for future work but also serve as a basis for the subsequent framework. We point out the need to focus on the following aspects: data handling, awareness, user consent, and the design of the user interface.},
booktitle = {Technology-Augmented Perception and Cognition},
doi = {10.1007/978-3-030-30457-7_8},
timestamp = {2021.01.15},
url = {http://florian-alt.org/unibw/wp-content/publications/khamis2021springer.pdf},
}
D. Buschek and F. Alt, “Building Adaptive Touch Interfaces — Case Study 6,” in Intelligent computing for interactive system design: statistics, digital signal processing, and machine learning in practice, 1 ed., New York, NY, USA: Association for Computing Machinery, 2021, p. 379–406. doi:10.1145/3447404.3447426
[BibTeX] [PDF]
@InBook{buschek2021intelligentcomputing,
author = {Buschek, Daniel and Alt, Florian},
pages = {379–406},
publisher = {Association for Computing Machinery},
title = {{Building Adaptive Touch Interfaces — Case Study 6}},
year = {2021},
address = {New York, NY, USA},
edition = {1},
isbn = {9781450390293},
note = {buschek2021intelligentcomputing},
booktitle = {Intelligent Computing for Interactive System Design: Statistics, Digital Signal Processing, and Machine Learning in Practice},
doi = {10.1145/3447404.3447426},
numpages = {28},
owner = {florian},
timestamp = {2021.01.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2021intelligentcomputing.pdf},
}
K. Pfeuffer, Y. Abdrabou, A. Esteves, R. Rivu, Y. Abdelrahman, S. Meitner, A. Saadi, and F. Alt. ARtention: A Design Space for Gaze-adaptive User Interfaces in Augmented Reality. Computers & Graphics, 2021. doi:10.1016/j.cag.2021.01.001
[BibTeX] [Abstract] [PDF]
Augmented Reality (AR) headsets extended with eye-tracking, a promising input technology for its natural and implicit nature, open a wide range of new interaction capabilities for everyday use. In this paper we present ARtention, a design space for gaze interaction specifically tailored for in-situ AR information interfaces. It highlights three important dimensions to consider in the UI design of such gaze-enabled applications: transitions from reality to the virtual interface, from single- to multi-layer content, and from information consumption to selection tasks. Such transitional aspects bring previously isolated gaze interaction concepts together to form a unified AR space, enabling more advanced application control seamlessly mediated by gaze. We describe these factors in detail. To illustrate how the design space can be used, we present three prototype applications and report informal user feedback obtained from different scenarios: a conversational UI, viewing a 3D visualization, and browsing items for shopping. We conclude with design considerations derived from our development and evaluation of the prototypes. We expect these to be valuable for researchers and designers investigating the use of gaze input in AR systems and applications.
@Article{pfeuffer2021cg,
author = {Ken Pfeuffer and Yasmeen Abdrabou and Augusto Esteves and Radiah Rivu and Yomna Abdelrahman and Stefanie Meitner and Amr Saadi and Florian Alt},
journal = {{Computers \& Graphics}},
title = {{ARtention: A Design Space for Gaze-adaptive User Interfaces in Augmented Reality}},
year = {2021},
issn = {0097-8493},
note = {pfeuffer2021cg},
abstract = {Augmented Reality (AR) headsets extended with eye-tracking, a promising input technology for its natural and implicit nature, open a wide range of new interaction capabilities for everyday use. In this paper we present ARtention, a design space for gaze interaction specifically tailored for in-situ AR information interfaces. It highlights three important dimensions to consider in the UI design of such gaze-enabled applications: transitions from reality to the virtual interface, from single- to multi-layer content, and from information consumption to selection tasks. Such transitional aspects bring previously isolated gaze interaction concepts together to form a unified AR space, enabling more advanced application control seamlessly mediated by gaze. We describe these factors in detail. To illustrate how the design space can be used, we present three prototype applications and report informal user feedback obtained from different scenarios: a conversational UI, viewing a 3D visualization, and browsing items for shopping. We conclude with design considerations derived from our development and evaluation of the prototypes. We expect these to be valuable for researchers and designers investigating the use of gaze input in AR systems and applications.},
doi = {10.1016/j.cag.2021.01.001},
keywords = {Augmented reality, AR, Mixed reality, MR, Gaze Interaction, Attention, Visualization, Design space},
timestamp = {2021.01.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeuffer2021cg.pdf},
}

2020

A. Schmidt and F. Alt. Evaluation in Human-Computer Interaction – Beyond Lab Studies. Working Document, 2020.
[BibTeX] [Abstract] [PDF]
In this paper we present a set of approaches to evaluation in human computer interaction that offer an alternative to lab studies. The discussion focuses on research approaches, data collection methods, and tools that can be conducted without direct interaction between the researchers and the participants.
@Article{schmidt2020beyondlab,
author = {Albrecht Schmidt AND Florian Alt},
journal = {{Working Document}},
title = {{Evaluation in Human-Computer Interaction -- Beyond Lab Studies}},
year = {2020},
note = {schmidt2020beyondlab},
abstract = {In this paper we present a set of approaches to evaluation in human computer interaction that offer an alternative to lab studies. The discussion focuses on research approaches, data collection methods, and tools that can be conducted without direct interaction between the researchers and the participants.},
timestamp = {2021.05.07},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schmidt2020beyondlab.pdf},
}
G. Graf, Y. Abdelrahman, H. Xu, Y. Abdrabou, D. Schitz, H. Hußmann, and F. Alt. The Predictive Corridor: A Virtual Augmented Driving Assistance System for Teleoperated Autonomous Vehicles. In International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE 2020), The Eurographics Association, 2020. doi:10.2312/egve.20201260
[BibTeX] [PDF]
@InProceedings{graf2020icat,
author = {Graf, Gaetano and Abdelrahman, Yomna and Xu, Hao and Abdrabou, Yasmeen and Schitz, Dmitrij and Hußmann, Heinrich and Alt, Florian},
booktitle = {{International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments}},
title = {{The Predictive Corridor: A Virtual Augmented Driving Assistance System for Teleoperated Autonomous Vehicles}},
year = {2020},
editor = {Argelaguet, Ferran and McMahan, Ryan and Sugimoto, Maki},
note = {graf2020icat},
publisher = {The Eurographics Association},
series = {ICAT-EGVE 2020},
doi = {10.2312/egve.20201260},
isbn = {978-3-03868-111-3},
issn = {1727-530X},
timestamp = {2020.12.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/graf2020icat.pdf},
}
K. Marky, S. Prange, F. Krell, M. Mühlhäuser, and F. Alt. ‘You just can’t know about everything’: Privacy Perceptions of Smart Home Visitors. In Proceedings of the 19th International Conference on Mobile and Ubiquitous Multimedia (MUM’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3365610.3365626
[BibTeX] [Abstract] [PDF]
IoT devices can harvest personal information of any person in their surroundings and this includes data from \textit{visitors}. Visitors often cannot protect their privacy in a foreign smart environment. This might be rooted in a poor awareness of privacy violations by IoT devices, a lack of knowledge, or a lack of coping strategies. Thus, visitors are typically unaware of being tracked by IoT devices or lack means to influence which data is collected about them. We interviewed 21 young adults to investigate which knowledge visitors of smart environments need and wish to be able and protect their privacy. We found that visitors consider their relation to the IoT device owner and familiarity with the environment and IoT devices when making decisions about data sharing that affect their privacy. Overall, the visitors of smart environments demonstrated similar privacy preferences like the owners of IoT devices but lacked means to judge consequences of data collection and means to express their privacy preferences. Based on our results, we discuss prerequisites for enabling visitor privacy in smart environments, demonstrate gaps in existing solutions and provide several methods to improve the awareness of smart environment visitors.
@InProceedings{marky2020mum,
author = {Marky, Karola AND Prange, Sarah AND Krell, Florian AND Mühlhäuser, Max AND Alt, Florian},
booktitle = {{Proceedings of the 19th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{'You just can’t know about everything': Privacy Perceptions of Smart Home Visitors}},
year = {2020},
address = {New York, NY, USA},
note = {marky2020mum},
publisher = {Association for Computing Machinery},
series = {MUM'20},
abstract = {IoT devices can harvest personal information of any person in their surroundings and this includes data from \textit{visitors}. Visitors often cannot protect their privacy in a foreign smart environment. This might be rooted in a poor awareness of privacy violations by IoT devices, a lack of knowledge, or a lack of coping strategies. Thus, visitors are typically unaware of being tracked by IoT devices or lack means to influence which data is collected about them.
We interviewed 21 young adults to investigate which knowledge visitors of smart environments need and wish to be able and protect their privacy.
We found that visitors consider their relation to the IoT device owner and familiarity with the environment and IoT devices when making decisions about data sharing that affect their privacy. Overall, the visitors of smart environments demonstrated similar privacy preferences like the owners of IoT devices but lacked means to judge consequences of data collection and means to express their privacy preferences.
Based on our results, we discuss prerequisites for enabling visitor privacy in smart environments, demonstrate gaps in existing solutions and provide several methods to improve the awareness of smart environment visitors.},
doi = {10.1145/3365610.3365626},
location = {Essen, Germany},
timestamp = {2020.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/marky2020mum.pdf},
}
Y. Fanger, K. Pfeuffer, U. Helmbrecht, and F. Alt. PIANX – A Platform for Piano Players to Alleviate Music Performance Anxiety Using Mixed Reality. In 19th International Conference on Mobile and Ubiquitous Multimedia (MUM 2020), Association for Computing Machinery, New York, NY, USA, 2020, p. 267–276. doi:10.1145/3428361.3428394
[BibTeX] [Abstract] [PDF]
We present PIANX, a platform to assist piano players in alleviating Music Performance Anxiety (MPA). Our work is motivated by the ability of Virtual Reality (VR) to create environments closely resembling the real world. For musicians, settings such as auditions or concerts are of particular interest, since they allow practicing in situations which evoke stress as a result of stage fright. Current approaches are limited: while they provide a virtual scene, realistic haptic feedback (i.e. playing on a real piano) and an authentic representation of their hands is missing. We close this gap with the design of a Mixed Reality platform, consisting of a MIDI (Musical Instrument Digital Interface) stage piano and an HTC Vive Pro VR headset. The platform offers (a) two approaches to finger tracking and visualization – a virtual representation based on LeapMotion hand tracking (baseline) and a real representation using see-through VR; in addition, it provides (b) three different settings in which users can practice (home, audition, concert hall) and (c) a mechanism for real time feedback. We created a series of videos demonstrating the system and collected feedback from 23 participants in an online study, assessing their views towards our platform. Results reveal key insights for the design of virtual MPA training platforms from a scientific and consumer perspective.
@InProceedings{fanger2020mum,
author = {Fanger, Yara and Pfeuffer, Ken and Helmbrecht, Udo and Alt, Florian},
booktitle = {{19th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{PIANX – A Platform for Piano Players to Alleviate Music Performance Anxiety Using Mixed Reality}},
year = {2020},
address = {New York, NY, USA},
note = {fanger2020mum},
pages = {267–276},
publisher = {Association for Computing Machinery},
series = {MUM 2020},
abstract = {We present PIANX, a platform to assist piano players in alleviating Music Performance
Anxiety (MPA). Our work is motivated by the ability of Virtual Reality (VR) to create
environments closely resembling the real world. For musicians, settings such as auditions
or concerts are of particular interest, since they allow practicing in situations
which evoke stress as a result of stage fright. Current approaches are limited: while
they provide a virtual scene, realistic haptic feedback (i.e. playing on a real piano)
and an authentic representation of their hands is missing. We close this gap with
the design of a Mixed Reality platform, consisting of a MIDI (Musical Instrument Digital
Interface) stage piano and an HTC Vive Pro VR headset. The platform offers (a) two
approaches to finger tracking and visualization – a virtual representation based on
LeapMotion hand tracking (baseline) and a real representation using see-through VR;
in addition, it provides (b) three different settings in which users can practice
(home, audition, concert hall) and (c) a mechanism for real time feedback. We created
a series of videos demonstrating the system and collected feedback from 23 participants
in an online study, assessing their views towards our platform. Results reveal key
insights for the design of virtual MPA training platforms from a scientific and consumer
perspective.},
doi = {10.1145/3428361.3428394},
isbn = {9781450388702},
keywords = {virtual reality, performance anxiety, music},
location = {Essen, Germany},
numpages = {10},
timestamp = {2020.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/fanger2020mum.pdf},
}
M. Khamis and F. Alt, “Augmented Perception and Cognition,” , T. Dingler, Ed., Springer, 2020. doi:10.1007/978-3-030-30457-7_8
[BibTeX] [Abstract] [PDF]
In this chapter, we present a privacy and security framework for designers of technologies that augment humans’ cognitive and perceptive capabilities. The framework consists of several groups of questions, meant to guide designers during the different stages of the design process. The objective of our work is to support the need for considering implications of novel technologies with regard to privacy and security early in the design process rather than post-hoc. The framework is based on a thorough review of the technologies presented earlier on in this book as well as of prior research in the field of technology augmentation. From this review, we derived several themes that are not only valuable pointers for future work but also serve as a basis for the subsequent framework. We point out the need to focus on the following aspects: data handling, awareness, user consent, and the design of the user interface.
@InBook{khamis2020augmentation,
author = {Khamis, Mohamed AND Alt, Florian},
chapter = {{Privacy and Security in Augmentation}},
editor = {Dingler, Tilman},
publisher = {Springer},
title = {{Augmented Perception and Cognition}},
year = {2020},
note = {khamis2020augmentation},
abstract = {In this chapter, we present a privacy and security framework for designers of technologies that augment
humans’ cognitive and perceptive capabilities. The framework consists of several groups of questions,
meant to guide designers during the different stages of the design process. The objective of our work is to
support the need for considering implications of novel technologies with regard to privacy and security
early in the design process rather than post-hoc. The framework is based on a thorough review of the
technologies presented earlier on in this book as well as of prior research in the field of technology
augmentation. From this review, we derived several themes that are not only valuable pointers for future
work but also serve as a basis for the subsequent framework. We point out the need to focus on the
following aspects: data handling, awareness, user consent, and the design of the user interface.},
doi = {10.1007/978-3-030-30457-7_8},
owner = {florian},
timestamp = {2020.11.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2020augmentation.pdf},
}
Muc ’20: proceedings of mensch und computer 2020New York, NY, USA: Association for Computing Machinery, 2020.
[BibTeX] [Abstract] [PDF]
MuC serves as a unique forum for presenting and exchanging ideas around innovative work through its different formats. Hence, MuC is a great venue to not only present and discuss the latest research. MuC also offers great opportunities to start collaborations and to extend people’s network beyond their community.
@Proceedings{alt2020muc,
title = {MuC '20: Proceedings of Mensch Und Computer 2020},
year = {2020},
address = {New York, NY, USA},
isbn = {9781450375405},
publisher = {Association for Computing Machinery},
abstract = {MuC serves as a unique forum for presenting and exchanging ideas around innovative work through its different formats. Hence, MuC is a great venue to not only present and discuss the latest research. MuC also offers great opportunities to start collaborations and to extend people's network beyond their community.},
editors = {Florian Alt AND Stefan Schneegass AND Eva Hornecker},
location = {Magdeburg, Germany},
timestamp = {2020.10.19},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2020muc.pdf},
}
D. Englmeier, J. O’Hagan, M. Zhang, F. Alt, A. Butz, T. Höllerer, and J. Williamson. TangibleSphere – Interaction Techniques for Physical and Virtual Spherical Displays. In Proceedings of the 11th Nordic Conference on Human-Computer Interaction: Shaping Experiences, Shaping Society (NordiCHI ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3419249.3420101
[BibTeX] [Abstract] [PDF] [Video]
Tangible interaction is generally assumed to provide benefits compared to other interaction styles due to its physicality. We demonstrate how this physicality can be brought to VR by means of TangibleSphere – a tracked, low-cost physical object that can (a) be rotated freely and (b) is overlaid with a virtual display. We present two studies, investigating performance in terms of efficiency and usability: the first study (N=16) compares TangibleSphere to a physical spherical display regarding accuracy and task completion time. We found comparable results for both types of displays. The second study (N=32) investigates the influence of physical rotation in more depth. We compare a pure VR condition to TangibleSphere in two conditions: one that allows actual physical rotation of the object and one that does not. Our findings show that physical rotation significantly improves accuracy and task completion time. These insights are valuable for researchers designing interaction techniques and interactive visualizations for spherical displays and for VR researchers aiming to incorporate physical touch into the experiences they design.
@InProceedings{englmeier2020nordichi,
author = {Englmeier, David and O'Hagan, Joseph and Zhang, Mengyi and Alt, Florian and Butz, Andreas and H\"{o}llerer, Tobias and Williamson, Julie},
booktitle = {{Proceedings of the 11th Nordic Conference on Human-Computer Interaction: Shaping Experiences, Shaping Society}},
title = {{TangibleSphere – Interaction Techniques for Physical and Virtual Spherical Displays}},
year = {2020},
address = {New York, NY, USA},
note = {englmeier2020nordichi},
publisher = {Association for Computing Machinery},
series = {NordiCHI '20},
abstract = {Tangible interaction is generally assumed to provide benefits compared to other interaction styles due to its physicality. We demonstrate how this physicality can be brought to VR by means of TangibleSphere – a tracked, low-cost physical object that can (a) be rotated freely and (b) is overlaid with a virtual display. We present two studies, investigating performance in terms of efficiency and usability: the first study (N=16) compares TangibleSphere to a physical spherical display regarding accuracy and task completion time. We found comparable results for both types of displays. The second study (N=32) investigates the influence of physical rotation in more depth. We compare a pure VR condition to TangibleSphere in two conditions: one that allows actual physical rotation of the object and one that does not. Our findings show that physical rotation significantly improves accuracy and task completion time. These insights are valuable for researchers designing interaction techniques and interactive visualizations for spherical displays and for VR researchers aiming to incorporate physical touch into the experiences they design.},
articleno = {75},
doi = {10.1145/3419249.3420101},
isbn = {9781450375795},
keywords = {tangible interaction, physicality, virtual reality, spherical displays, display simulation},
location = {Tallinn, Estonia},
numpages = {11},
timestamp = {2020.10.19},
url = {https://doi.org/10.1145/3419249.3420101},
video = {englmeier2020nordichi},
}
M. Braun, J. Li, F. Weber, B. Pfleging, A. Butz, and F. Alt. What If Your Car Would Care? Exploring Use Cases For Affective Automotive User Interfaces. In 22nd International Conference on Human-Computer Interaction with Mobile Devices and Services (MobileHCI ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3379503.3403530
[BibTeX] [Abstract] [PDF]
In this paper we present use cases for affective user interfaces (UIs) in cars and how they are perceived by potential users in China and Germany. Emotion-aware interaction is enabled by the improvement of ubiquitous sensing methods and provides potential benefits for both traffic safety and personal well-being. To promote the adoption of affective interaction at an international scale, we developed 20 mobile in-car use cases through an inter-cultural design approach and evaluated them with 65 drivers in Germany and China. Our data shows perceived benefits in specific areas of pragmatic quality as well as cultural differences, especially for socially interactive use cases. We also discuss general implications for future affective automotive UI. Our results provide a perspective on cultural peculiarities and a concrete starting point for practitioners and researchers working on emotion-aware interfaces.
@InProceedings{braun2020mobilehci,
author = {Michael Braun AND Jingyi Li AND Florian Weber AND Bastian Pfleging AND Andreas Butz AND Florian Alt},
booktitle = {{22nd International Conference on Human-Computer Interaction with Mobile Devices and Services}},
title = {{What If Your Car Would Care? Exploring Use Cases For Affective Automotive User Interfaces}},
year = {2020},
address = {New York, NY, USA},
note = {braun2020mobilehci},
publisher = {Association for Computing Machinery},
series = {MobileHCI '20},
abstract = {In this paper we present use cases for affective user interfaces (UIs) in cars and how they are perceived by potential users in China and Germany. Emotion-aware interaction is enabled by the improvement of ubiquitous sensing methods and provides potential benefits for both traffic safety and personal well-being. To promote the adoption of affective interaction at an international scale, we developed 20 mobile in-car use cases through an inter-cultural design approach and evaluated them with 65 drivers in Germany and China. Our data shows perceived benefits in specific areas of pragmatic quality as well as cultural differences, especially for socially interactive use cases. We also discuss general implications for future affective automotive UI. Our results provide a perspective on cultural peculiarities and a concrete starting point for practitioners and researchers working on emotion-aware interfaces.},
articleno = {37},
doi = {10.1145/3379503.3403530},
isbn = {9781450375160},
keywords = {Human-Computer Interaction, Affective Computing, Interaction Design, Automotive User Interfaces, Emotion Detection},
location = {Oldenburg, Germany},
numpages = {12},
timestamp = {2020.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2020mobilehci.pdf},
}
M. Fröhlich, F. Gutjahr, and F. Alt. Don’t Lose Your Coin! Investigating Security Practices of Cryptocurrency Users. In Proceedings of the 2020 ACM Designing Interactive Systems Conference (DIS ’20), Association for Computing Machinery, New York, NY, USA, 2020, p. 1751–1763. doi:10.1145/3357236.3395535
[BibTeX] [Abstract] [PDF]
In recent years, cryptocurrencies have increasingly gained interest. The underlying technology, Blockchain, shifts the responsibility for securing assets to the end-user and requires them to manage their (private) keys. Little attention has been given to how cryptocurrency users handle the challenges of key management in practice and how they select the tools to do so. To close this gap, we conducted semi-structured interviews (N=10). Our thematic analysis revealed prominent themes surrounding motivation, risk assessment, and coin management tool usage in practice. We found that the choice of tools is driven by how users assess and balance the key risks that can lead to loss: the risk of (1) human error, (2) betrayal, and (3) malicious attacks. We derive a model, explaining how risk assessment and intended usage drive the decision which tools to use. Our work is complemented by discussing design implications for building systems for the crypto economy.
@InProceedings{froehlich2020dis,
author = {Fr\"{o}hlich, Michael and Gutjahr, Felix and Alt, Florian},
booktitle = {{Proceedings of the 2020 ACM Designing Interactive Systems Conference}},
title = {{Don't Lose Your Coin! Investigating Security Practices of Cryptocurrency Users}},
year = {2020},
address = {New York, NY, USA},
note = {froehlich2020dis},
pages = {1751–1763},
publisher = {Association for Computing Machinery},
series = {DIS '20},
abstract = {In recent years, cryptocurrencies have increasingly gained interest. The underlying technology, Blockchain, shifts the responsibility for securing assets to the end-user and requires them to manage their (private) keys. Little attention has been given to how cryptocurrency users handle the challenges of key management in practice and how they select the tools to do so. To close this gap, we conducted semi-structured interviews (N=10). Our thematic analysis revealed prominent themes surrounding motivation, risk assessment, and coin management tool usage in practice. We found that the choice of tools is driven by how users assess and balance the key risks that can lead to loss: the risk of (1) human error, (2) betrayal, and (3) malicious attacks. We derive a model, explaining how risk assessment and intended usage drive the decision which tools to use. Our work is complemented by discussing design implications for building systems for the crypto economy.},
doi = {10.1145/3357236.3395535},
isbn = {9781450369749},
numpages = {13},
timestamp = {2020.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/froehlich2020dis.pdf},
}
V. Gentile, M. Khamis, F. Milazzo, S. Sorce, A. Malizia, and F. Alt. Predicting mid-air gestural interaction with public displays based on audience behaviour. International Journal of Human-Computer Studies, vol. 144, p. 102497, 2020. doi:10.1016/j.ijhcs.2020.102497
[BibTeX] [Abstract] [PDF]
Knowledge about the expected interaction duration and expected distance from which users will interact with public displays can be useful in many ways. For example, knowing upfront that a certain setup will lead to shorter interactions can nudge space owners to alter the setup. If a system can predict that incoming users will interact at a long distance for a short amount of time, it can accordingly show shorter versions of content (e.g., videos/advertisements) and employ at-a-distance interaction modalities (e.g., mid-air gestures). In this work, we propose a method to build models for predicting users’ interaction duration and distance in public display environments, focusing on mid-air gestural interactive displays. First, we report our findings from a field study showing that multiple variables, such as audience size and behaviour, significantly influence interaction duration and distance. We then train predictor models using contextual data, based on the same variables. By applying our method to a mid-air gestural interactive public display deployment, we build a model that predicts interaction duration with an average error of about 8 s, and interaction distance with an average error of about 35 cm. We discuss how researchers and practitioners can use our work to build their own predictor models, and how they can use them to optimise their deployment.
@Article{gentile2020ijhcs,
author = {Vito Gentile and Mohamed Khamis and Fabrizio Milazzo and Salvatore Sorce and Alessio Malizia and Florian Alt},
journal = {{International Journal of Human-Computer Studies}},
title = {{Predicting mid-air gestural interaction with public displays based on audience behaviour}},
year = {2020},
issn = {1071-5819},
note = {gentile2020ijhcs},
pages = {102497},
volume = {144},
abstract = {Knowledge about the expected interaction duration and expected distance from which users will interact with public displays can be useful in many ways. For example, knowing upfront that a certain setup will lead to shorter interactions can nudge space owners to alter the setup. If a system can predict that incoming users will interact at a long distance for a short amount of time, it can accordingly show shorter versions of content (e.g., videos/advertisements) and employ at-a-distance interaction modalities (e.g., mid-air gestures). In this work, we propose a method to build models for predicting users’ interaction duration and distance in public display environments, focusing on mid-air gestural interactive displays. First, we report our findings from a field study showing that multiple variables, such as audience size and behaviour, significantly influence interaction duration and distance. We then train predictor models using contextual data, based on the same variables. By applying our method to a mid-air gestural interactive public display deployment, we build a model that predicts interaction duration with an average error of about 8 s, and interaction distance with an average error of about 35 cm. We discuss how researchers and practitioners can use our work to build their own predictor models, and how they can use them to optimise their deployment.},
doi = {10.1016/j.ijhcs.2020.102497},
keywords = {Pervasive displays, Users behaviour, Audience behaviour},
owner = {florian},
timestamp = {2020.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/gentile2020ijhcs.pdf},
}
Y. Abdrabou, K. Pfeuffer, M. Khamis, and F. Alt. GazeLockPatterns: Comparing Authentication Using Gaze andTouch for Entering Lock Patterns. In Proceedings of the 2020 ACM Symposium on Eye Tracking Research & Applications (ETRA ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3379156.3391371
[BibTeX] [Abstract] [PDF] [Video]
In this work, we present a comparison between Android’s lock patterns for mobile devices (TouchLockPatterns) and an implementation of lock patterns that uses gaze input (GazeLockPatterns). We report on results of a between subjects study (N=40) to show that for the same layout of authentication interface, people employ comparable strategies for pattern composition. We discuss the pros and cons of adapting lock patterns to gaze-based user interfaces. We conclude by opportunities for future work, such as using data collected during authentication for calibrating eye trackers.
@InProceedings{abdrabou2020etra,
author = {Yasmeen Abdrabou AND Ken Pfeuffer AND Mohamed Khamis AND Florian Alt},
booktitle = {{Proceedings of the 2020 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{GazeLockPatterns: Comparing Authentication Using Gaze andTouch for Entering Lock Patterns}},
year = {2020},
address = {New York, NY, USA},
note = {abdrabou2020etra},
publisher = {Association for Computing Machinery},
series = {ETRA '20},
abstract = {In this work, we present a comparison between Android's lock patterns for mobile devices (TouchLockPatterns) and an implementation of lock patterns that uses gaze input (GazeLockPatterns).
We report on results of a between subjects study (N=40) to show that for the same layout of authentication interface, people employ comparable strategies for pattern composition. We discuss the pros and cons of adapting lock patterns to gaze-based user interfaces. We conclude by opportunities for future work, such as using data collected during authentication for calibrating eye trackers.},
doi = {10.1145/3379156.3391371},
isbn = {978-1-4503-7134-6},
keywords = {eye-tracking, calibration, eye-tracker, smooth pursuit, eye movement},
location = {Stuttgart, Germany},
numpages = {7},
owner = {florian},
timestamp = {2020.06.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdrabou2020etra.pdf},
video = {abdrabou2020etra},
}
R. Rivu, Y. Abdrabou, K. Pfeuffer, A. Esteves, S. Meitner, and F. Alt. StARe: Gaze-Assisted Face-to-Face Communication in Augmented Reality. In Proceedings of the 2020 ACM Symposium on Eye Tracking Research & Applications (COGAIN ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3379157.3388930
[BibTeX] [Abstract] [PDF]
This research explores the use of eye-tracking during Augmented Reality (AR) – supported conversations. In this scenario, users can obtain information that supports the conversation, without augmentations distracting the actual conversation.We propose using gaze that allows users to gradually reveal information on demand. Information is indicated around user’s head, which becomes fully visible when other’s visual attention explicitly falls upon the area. We describe the design of such an AR UI and present an evaluation of the feasibility of the concept. Results show that despite gaze inaccuracies, users were positive about augmenting their conversations with contextual information and gaze interactivity. We provide insights into the trade-offs between focusing on the task at hand (i.e., the conversation), and consuming AR information. These findings are useful for future use cases of eye based AR interactions by contributing to a better understanding of the intricate balance between informative AR and information overload.
@InProceedings{rivu2020cogain,
author = {Rivu, Radiah and Abdrabou, Yasmeen and Pfeuffer, Ken and Esteves, Augusto and Meitner, Stefanie and Alt, Florian},
booktitle = {{Proceedings of the 2020 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{StARe: Gaze-Assisted Face-to-Face Communication in Augmented Reality}},
year = {2020},
address = {New York, NY, USA},
note = {rivu2020cogain},
publisher = {Association for Computing Machinery},
series = {COGAIN '20},
abstract = {This research explores the use of eye-tracking during Augmented Reality (AR) - supported
conversations. In this scenario, users can obtain information that supports the conversation,
without augmentations distracting the actual conversation.We propose using gaze that
allows users to gradually reveal information on demand. Information is indicated around
user’s head, which becomes fully visible when other’s visual attention explicitly
falls upon the area. We describe the design of such an AR UI and present an evaluation
of the feasibility of the concept. Results show that despite gaze inaccuracies, users
were positive about augmenting their conversations with contextual information and
gaze interactivity. We provide insights into the trade-offs between focusing on the
task at hand (i.e., the conversation), and consuming AR information. These findings
are useful for future use cases of eye based AR interactions by contributing to a
better understanding of the intricate balance between informative AR and information
overload.},
articleno = {14},
doi = {10.1145/3379157.3388930},
isbn = {9781450371353},
keywords = {Assistive Conversation, Eye-tracking, AR, Gaze Interaction},
location = {Stuttgart, Germany},
numpages = {5},
timestamp = {2020.06.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2020cogain.pdf},
}
T. Kosch, M. Hassib, R. Reutter, and F. Alt. Emotions on the Go: Mobile Emotion Assessment in Real-Time Using Facial Expressions. In Proceedings of the International Conference on Advanced Visual Interfaces (AVI ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3399715.3399928
[BibTeX] [Abstract] [PDF]
Exploiting emotions for user interface evaluation became an increasingly important research objective in Human-Computer Interaction. Emotions are usually assessed through surveys that do not allow information to be collected in real-time. In our work, we suggest the use of smartphones for mobile emotion assessment. We use the front-facing smartphone camera as a tool for emotion detection based on facial expressions. Such information can be used to reflect on emotional states or provide emotion-aware user interface adaptation. We collected facial expressions along with app usage data in a two-week field study consisting of a one-week training phase and a one-week testing phase. We built and evaluated a person-dependent classifier, yielding an average classification improvement of 33% compared to classifying facial expressions only. Furthermore, we correlate the estimated emotions with concurrent app usage to draw insights into changes in mood. Our work is complemented by a discussion of the feasibility of probing emotions on-the-go and potential use cases for future emotion-aware applications.
@InProceedings{kosch2020avi,
author = {Kosch, Thomas and Hassib, Mariam and Reutter, Robin and Alt, Florian},
booktitle = {{Proceedings of the International Conference on Advanced Visual Interfaces}},
title = {{Emotions on the Go: Mobile Emotion Assessment in Real-Time Using Facial Expressions}},
year = {2020},
address = {New York, NY, USA},
note = {kosch2020avi},
publisher = {Association for Computing Machinery},
series = {AVI '20},
abstract = {Exploiting emotions for user interface evaluation became an increasingly important research objective in Human-Computer Interaction. Emotions are usually assessed through surveys that do not allow information to be collected in real-time. In our work, we suggest the use of smartphones for mobile emotion assessment. We use the front-facing smartphone camera as a tool for emotion detection based on facial expressions. Such information can be used to reflect on emotional states or provide emotion-aware user interface adaptation. We collected facial expressions along with app usage data in a two-week field study consisting of a one-week training phase and a one-week testing phase. We built and evaluated a person-dependent classifier, yielding an average classification improvement of 33% compared to classifying facial expressions only. Furthermore, we correlate the estimated emotions with concurrent app usage to draw insights into changes in mood. Our work is complemented by a discussion of the feasibility of probing emotions on-the-go and potential use cases for future emotion-aware applications.},
articleno = {18},
doi = {10.1145/3399715.3399928},
isbn = {9781450375351},
keywords = {Affective Computing, Emotion Recognition, Emotion-Aware Interfaces, Mobile Sensing},
location = {Salerno, Italy},
numpages = {9},
timestamp = {2020.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/kosch2020avi.pdf},
}
S. Prange, L. Mecke, A. Nguyen, M. Khamis, and F. Alt. Don’t Use Fingerprint, it’s Raining! How People Use and Perceive Context-Aware Selection of Mobile Authentication. In Proceedings of the International Conference on Advanced Visual Interfaces (AVI ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3399715.3399823
[BibTeX] [Abstract] [PDF]
This paper investigates how smartphone users perceive switching from their primary authentication mechanism to a fallback one, based on the context. This is useful in cases where the primary mechanism fails (e.g., wet fingers when using fingerprint). While prior work introduced the concept, we are the first to investigate its perception by users and their willingness to follow a system’s suggestion for a switch. We present findings from a two-week field study (N=29) using an Android app, showing that users are willing to adopt alternative mechanisms when prompted. We discuss how context-awareness can improve the perception of authentication reliability and potentially improve usability and security.
@InProceedings{prange2020avi,
author = {Sarah Prange AND Lukas Mecke AND Alice Nguyen AND Mohamed Khamis AND Florian Alt},
booktitle = {{Proceedings of the International Conference on Advanced Visual Interfaces}},
title = {{Don't Use Fingerprint, it's Raining! How People Use and Perceive Context-Aware Selection of Mobile Authentication}},
year = {2020},
address = {New York, NY, USA},
note = {prange2020avi},
publisher = {Association for Computing Machinery},
series = {AVI '20},
abstract = {This paper investigates how smartphone users perceive switching from their primary authentication mechanism to a fallback one, based on the context. This is useful in cases where the primary mechanism fails (e.g., wet fingers when using fingerprint). While prior work introduced the concept, we are the first to investigate its perception by users and their willingness to follow a system's suggestion for a switch. We present findings from a two-week field study (N=29) using an Android app, showing that users are willing to adopt alternative mechanisms when prompted. We discuss how context-awareness can improve the perception of authentication reliability and potentially improve usability and security.},
articleno = {54},
doi = {10.1145/3399715.3399823},
isbn = {9781450375351},
keywords = {Android, Fingerprint, Field Study, Context-Aware Authentication, Biometrics, User Perception, Mobile Devices},
location = {Salerno, Italy},
numpages = {5},
timestamp = {2020.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2020avi.pdf},
}
M. Rittenbruch, R. Schroeter, F. Wirth, and F. Alt. An Exploratory Physical Computing Toolkit for Rapid Exploration and Co-Design of On-Bicycle Notification Interfaces. In Proceedings of the 2020 ACM Designing Interactive Systems Conference (DIS’20), Association for Computing Machinery, New York, NY, USA, 2020, p. 873–884. doi:10.1145/3357236.3395534
[BibTeX] [Abstract] [PDF]
Cycling offers significant health and environmental benefits, but safety remains a critical issue. We need better tools and design processes to develop on-bicycle notification interfaces, for example, for hazard warnings, and to overcome design challenges associated with the cycling context. We present a physical computing toolkit that supports the rapid exploration and co-design of on-bicycle interfaces. Physical plug-and-play interaction modules controlled by an orchestration interface allow participants to explore different tangible and ambient interaction approaches on a budget cycling simulator. The toolkit was assessed by analysing video recordings of two group design workshops (N=8) and twelve individual design sessions (N=12). Our results show that the toolkit enabled flexible transitions between ideation and out-of-the-box thinking, prototyping, and immediate evaluation. We offer insights on how to design physical computing toolkits that offer low-cost, ‘good enough’ simulation while allowing for free and safe exploration of on-bicycle notification interfaces.
@InProceedings{rittenbruch2020dis,
author = {Rittenbruch, Markus and Schroeter, Ronald and Wirth, Florian and Alt, Florian},
booktitle = {{Proceedings of the 2020 ACM Designing Interactive Systems Conference}},
title = {{An Exploratory Physical Computing Toolkit for Rapid Exploration and Co-Design of On-Bicycle Notification Interfaces}},
year = {2020},
address = {New York, NY, USA},
note = {rittenbruch2020dis},
pages = {873–884},
publisher = {Association for Computing Machinery},
series = {DIS'20},
abstract = {Cycling offers significant health and environmental benefits, but safety remains a
critical issue. We need better tools and design processes to develop on-bicycle notification
interfaces, for example, for hazard warnings, and to overcome design challenges associated
with the cycling context. We present a physical computing toolkit that supports the
rapid exploration and co-design of on-bicycle interfaces. Physical plug-and-play interaction
modules controlled by an orchestration interface allow participants to explore different
tangible and ambient interaction approaches on a budget cycling simulator. The toolkit
was assessed by analysing video recordings of two group design workshops (N=8) and
twelve individual design sessions (N=12). Our results show that the toolkit enabled
flexible transitions between ideation and out-of-the-box thinking, prototyping, and
immediate evaluation. We offer insights on how to design physical computing toolkits
that offer low-cost, 'good enough' simulation while allowing for free and safe exploration
of on-bicycle notification interfaces.},
doi = {10.1145/3357236.3395534},
isbn = {9781450369749},
numpages = {12},
timestamp = {2020.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rittenbruch2020dis.pdf},
}
C. Katsini, Y. Abdrabou, G. E. Raptidis, M. Khamis, and F. Alt. The Role of Eye Gaze in Security and Privacy Applications:Survey and Future HCI Research Directions. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems (CHI ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3313831.3376840
[BibTeX] [Abstract] [PDF] [Video] [Talk]
For the past 20 years, researchers have investigated the useof eye tracking in security applications. We present a holisticview on gaze-based security applications. In particular, we canvassedthe literature and classify the utility of gaze in securityapplications into a) authentication, b) privacy protection, andc) gaze monitoring during security critical tasks. This allowsus to chart several research directions, most importantly 1)conducting field studies of implicit and explicit gaze-basedauthentication due to recent advances in eye tracking, 2) researchon gaze-based privacy protection and gaze monitoringin security critical tasks which are under-investigated yet verypromising areas, and 3) understanding the privacy implicationsof pervasive eye tracking.We discuss the most promisingopportunities and most pressing challenges of eye trackingfor security that will shape research in gaze-based securityapplications for the next decade.
@InProceedings{katsini2020chi,
author = {Christina Katsini AND Yasmeen Abdrabou AND George E. Raptidis AND Mohamed Khamis AND Florian Alt},
booktitle = {{Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems}},
title = {{The Role of Eye Gaze in Security and Privacy Applications:Survey and Future HCI Research Directions}},
year = {2020},
address = {New York, NY, USA},
note = {katsini2020chi},
publisher = {Association for Computing Machinery},
series = {CHI ’20},
abstract = {For the past 20 years, researchers have investigated the useof eye tracking in security applications. We present a holisticview on gaze-based security applications. In particular, we canvassedthe literature and classify the utility of gaze in securityapplications into a) authentication, b) privacy protection, andc) gaze monitoring during security critical tasks. This allowsus to chart several research directions, most importantly 1)conducting field studies of implicit and explicit gaze-basedauthentication due to recent advances in eye tracking, 2) researchon gaze-based privacy protection and gaze monitoringin security critical tasks which are under-investigated yet verypromising areas, and 3) understanding the privacy implicationsof pervasive eye tracking.We discuss the most promisingopportunities and most pressing challenges of eye trackingfor security that will shape research in gaze-based securityapplications for the next decade.},
doi = {10.1145/3313831.3376840},
isbn = {9781450367080},
keywords = {Eye tracking, Gaze Interaction, Security, Privacy, Survey},
location = {Honolulu, HI, US},
talk = {https://www.youtube.com/watch?v=U5r2qIGw42k},
timestamp = {2020.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/katsini2020chi.pdf},
video = {katsini2020chi},
}
V. Mäkelä, R. Radiah, S. Alsherif, M. Khamis, C. Xiao, L. Borchert, A. Schmidt, and F. Alt. Virtual Field Studies: Conducting Studies on Public Displays in Virtual Reality. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems (), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3313831.3376796
[BibTeX] [Abstract] [PDF] [Talk]
For the past 20 years, researchers have investigated the useof eye tracking in security applications. We present a holisticview on gaze-based security applications. In particular, we canvassedthe literature and classify the utility of gaze in securityapplications into a) authentication, b) privacy protection, andc) gaze monitoring during security critical tasks. This allowsus to chart several research directions, most importantly 1)conducting field studies of implicit and explicit gaze-basedauthentication due to recent advances in eye tracking, 2) researchon gaze-based privacy protection and gaze monitoringin security critical tasks which are under-investigated yet verypromising areas, and 3) understanding the privacy implicationsof pervasive eye tracking.We discuss the most promisingopportunities and most pressing challenges of eye trackingfor security that will shape research in gaze-based securityapplications for the next decade.
@InProceedings{makela2020chi,
author = {Ville Mäkelä AND Rivu Radiah AND Saleh Alsherif AND Mohamed Khamis AND Chong Xiao AND Lisa Borchert AND Albrecht Schmidt AND Florian Alt},
booktitle = {{Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems}},
title = {{Virtual Field Studies: Conducting Studies on Public Displays in Virtual Reality}},
year = {2020},
address = {New York, NY, USA},
note = {makela2020chi},
publisher = {Association for Computing Machinery},
abstract = {For the past 20 years, researchers have investigated the useof eye tracking in security applications. We present a holisticview on gaze-based security applications. In particular, we canvassedthe literature and classify the utility of gaze in securityapplications into a) authentication, b) privacy protection, andc) gaze monitoring during security critical tasks. This allowsus to chart several research directions, most importantly 1)conducting field studies of implicit and explicit gaze-basedauthentication due to recent advances in eye tracking, 2) researchon gaze-based privacy protection and gaze monitoringin security critical tasks which are under-investigated yet verypromising areas, and 3) understanding the privacy implicationsof pervasive eye tracking.We discuss the most promisingopportunities and most pressing challenges of eye trackingfor security that will shape research in gaze-based securityapplications for the next decade.},
doi = {10.1145/3313831.3376796},
isbn = {9781450367080},
keywords = {Virtual reality, field studies, public displays, research methods},
location = {Honolulu, HI, US},
talk = {https://www.youtube.com/watch?v=DVXWuVx4HtY},
timestamp = {2020.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/makela2020chi.pdf},
}
S. Prange and F. Alt. I Wish You Were Smart(er): Investigating Users’ Desires and Needs Towards Home Appliances. In Extended Abstracts of the 2020 CHI Conference on Human Factors in Computing Systems (CHI EA ’20), Association for Computing Machinery, New York, NY, USA, 2020, p. 1–8. doi:10.1145/3334480.3382910
[BibTeX] [Abstract] [PDF]
In this work, we present findings from an online survey (N=77) in which we assessed situations of users wishing for features or devices in their home to be smart(er). Our work is motivated by the fact that on one hand, several successful smart devices and features found their way into users’ homes (e.g., smart TVs, smart assistants, smart toothbrushes). On the other hand, a more holistic understanding of when and why users would like devices and features to be smart is missing as of today. Such knowledge is valuable for researchers and practitioners to inform the design of future smart home devices and features, in particular with regards to interaction techniques, privacy mechanisms, and, ultimately, acceptance and uptake. We found that users would appreciate smart features for various use cases, including remote control and multi-tasking, and are willing to share devices. We believe our work to be useful for designers and HCI researchers by supporting the design and evaluation of future smart devices.
@InProceedings{prange2020chiea,
author = {Prange, Sarah and Alt, Florian},
booktitle = {{Extended Abstracts of the 2020 CHI Conference on Human Factors in Computing Systems}},
title = {{I Wish You Were Smart(er): Investigating Users' Desires and Needs Towards Home Appliances}},
year = {2020},
address = {New York, NY, USA},
note = {prange2020chiea},
pages = {1–8},
publisher = {Association for Computing Machinery},
series = {CHI EA '20},
abstract = {In this work, we present findings from an online survey (N=77) in which we assessed situations of users wishing for features or devices in their home to be smart(er). Our work is motivated by the fact that on one hand, several successful smart devices and features found their way into users' homes (e.g., smart TVs, smart assistants, smart toothbrushes). On the other hand, a more holistic understanding of when and why users would like devices and features to be smart is missing as of today. Such knowledge is valuable for researchers and practitioners to inform the design of future smart home devices and features, in particular with regards to interaction techniques, privacy mechanisms, and, ultimately, acceptance and uptake. We found that users would appreciate smart features for various use cases, including remote control and multi-tasking, and are willing to share devices. We believe our work to be useful for designers and HCI researchers by supporting the design and evaluation of future smart devices.},
doi = {10.1145/3334480.3382910},
isbn = {9781450368193},
keywords = {smart devices, smart homes, online survey},
location = {Honolulu, HI, USA},
numpages = {8},
timestamp = {2020.05.02},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2020chiea.pdf},
}
R. Rivu, Y. Abdrabou, K. Pfeuffer, M. Hassib, and F. Alt. Gaze’N’Touch: Enhancing Text Selection on Mobile Devices Using Gaze. In Extended Abstracts of the 2020 CHI Conference on Human Factors in Computing Systems (CHI EA ’20), Association for Computing Machinery, New York, NY, USA, 2020, p. 1–8. doi:10.1145/3334480.3382802
[BibTeX] [Abstract] [PDF]
Text selection is a frequent task we do everyday to edit, modify or delete text. Selecting a word requires not only precision but also switching between selections and typing which influences both speed and error rates. In this paper, we evaluate a novel concept that extends text editing with an additional modality, that is gaze. We present a user study (N=16) where we explore how, the novel concepts called GazeButton can improve text selection by comparing it to touch based selection. In addition, we tested the effect of text size on the selection techniques by comparing two different text sizes.Results show that gaze based selection was faster with bigger text size, although not statistically significant. Qualitative feedback show a preference on gaze over touch which motivates a new direction of gaze usage in text editors.
@InProceedings{rivu2020chiea,
author = {Radiah Rivu AND Yasmeen Abdrabou AND Ken Pfeuffer AND Mariam Hassib AND Florian Alt},
booktitle = {{Extended Abstracts of the 2020 CHI Conference on Human Factors in Computing Systems}},
title = {{Gaze’N’Touch: Enhancing Text Selection on Mobile Devices Using Gaze}},
year = {2020},
address = {New York, NY, USA},
note = {rivu2020chiea},
pages = {1–8},
publisher = {Association for Computing Machinery},
series = {CHI EA '20},
abstract = {Text selection is a frequent task we do everyday to edit, modify or delete text. Selecting a word requires not only precision but also switching between selections and typing which influences both speed and error rates. In this paper, we evaluate a novel concept that extends text editing with an additional modality, that is gaze. We present a user study (N=16) where we explore how, the novel concepts called GazeButton can improve text selection by comparing it to touch based selection. In addition, we tested the effect of text size on the selection techniques by comparing two different text sizes.Results show that gaze based selection was faster with bigger text size, although not statistically significant. Qualitative feedback show a preference on gaze over touch which motivates a new direction of gaze usage in text editors.},
doi = {10.1145/3334480.3382802},
isbn = {9781450368193},
keywords = {gaze and touch, interaction, text editing, gaze selection},
location = {Honolulu, HI, USA},
numpages = {8},
timestamp = {2020.05.02},
url = {http://florian-alt.org/unibw/wp-content/publications/rivu2020chiea.pdf},
}
S. Schneegass, A. Sasse, F. Alt, and D. Vogel. Authentication beyond desktops and smartphones: novel approaches for smart devices and environments. In Extended abstracts of the 2020 chi conference on human factors in computing systems (CHI EA ’20), Association for Computing Machinery, New York, NY, USA, 2020, p. 1–6. doi:10.1145/3334480.3375144
[BibTeX] [Abstract] [PDF]
Much of the research on authentication in the past decades focused on developing authentication mechanisms for desktop computers and smartphones with the goal of making them both secure and usable. At the same time, the increasing number of smart devices that are becoming part of our everyday life creates new challenges for authentication, in particular since many of those devices are not designed and developed with authentication in mind. Examples include but are not limited to wearables, AR and VR glasses, devices in smart homes, and public displays. The goal of this workshop is to develop a common understanding of challenges and opportunities smart devices and environments create for secure and usable authentication. Therefore, we will bring together researchers and practitioners from HCI, usable security, and specific application areas (e.g., smart homes, wearables) to develop a research agenda for future approaches to authentication.
@inproceedings{schneegass2020chiea,
author = {Schneegass, Stefan and Sasse, Angela and Alt, Florian and Vogel, Daniel},
title = {Authentication Beyond Desktops and Smartphones: Novel Approaches for Smart Devices and Environments},
year = {2020},
isbn = {9781450368193},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {http://florian-alt.org/unibw/wp-content/publications/schneegass2020chiea.pdf},
doi = {10.1145/3334480.3375144},
abstract = {Much of the research on authentication in the past decades focused on developing authentication mechanisms for desktop computers and smartphones with the goal of making them both secure and usable. At the same time, the increasing number of smart devices that are becoming part of our everyday life creates new challenges for authentication, in particular since many of those devices are not designed and developed with authentication in mind. Examples include but are not limited to wearables, AR and VR glasses, devices in smart homes, and public displays. The goal of this workshop is to develop a common understanding of challenges and opportunities smart devices and environments create for secure and usable authentication. Therefore, we will bring together researchers and practitioners from HCI, usable security, and specific application areas (e.g., smart homes, wearables) to develop a research agenda for future approaches to authentication.},
booktitle = {Extended Abstracts of the 2020 CHI Conference on Human Factors in Computing Systems},
pages = {1–6},
numpages = {6},
keywords = {authentication, ubicomp, smart environments},
location = {, Honolulu, HI, USA, },
series = {CHI EA '20},
timestamp = {2020.05.01},
note={schneegass2020chiea}
}
Y. Abdrabou, S. Prange, L. Mecke, K. Pfeuffer, and F. Alt. VolumePatterns: Using Hardware Buttons beyond Volume Control on Mobile Devices. In Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones (WABDS’20), 2020.
[BibTeX] [PDF]
@InProceedings{abdrabou2020wabds,
author = {Yasmeen Abdrabou AND Sarah Prange AND Lukas Mecke AND Ken Pfeuffer AND Florian Alt},
booktitle = {{Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones}},
title = {{VolumePatterns: Using Hardware Buttons beyond Volume Control on Mobile Devices}},
year = {2020},
note = {saad2020wabds},
series = {WABDS'20},
timestamp = {2020.04.27},
url = {http://florian-alt.org/unibw/wp-content/publications/saad2020wabds.pdf},
}
S. Prange and F. Alt. Interact2Authenticate: Towards Usable Authentication in Smart Environments. In Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones (WABDS’20), 2020.
[BibTeX] [PDF]
@InProceedings{prange2020wabds,
author = {Sarah Prange AND Florian Alt},
booktitle = {{Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones}},
title = {{Interact2Authenticate: Towards Usable Authentication in Smart Environments}},
year = {2020},
note = {prange2020wabds},
series = {WABDS'20},
timestamp = {2020.04.27},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2020wabds.pdf},
}
R. Radiah, V. Maekelae, M. Hassib, and F. Alt. Understanding Emotions in Virtual Reality. In Proceedings of the 1st CHI Workshop on Momentary Emotion Elicitation and Capture (MEEC’20), 2020.
[BibTeX] [PDF]
@InProceedings{rivu2020meec,
author = {Rivu Radiah AND Ville Maekelae AND Mariam Hassib AND Florian Alt},
booktitle = {{Proceedings of the 1st CHI Workshop on Momentary Emotion Elicitation and Capture}},
title = {{Understanding Emotions in Virtual Reality}},
year = {2020},
note = {rivu2020meec},
series = {MEEC'20},
timestamp = {2020.04.27},
url = {http://florian-alt.org/unibw/wp-content/publications/rivu2020meec.pdf},
}
A. Saad, S. D. Rodriguez, R. Heger, F. Alt, and S. Schneegass. Understanding User-Centered Attacks In-The-Wild. In Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones (WABDS’20), 2020.
[BibTeX] [PDF]
@InProceedings{saad2020wabds,
author = {Alia Saad AND Sarah Delgado Rodriguez AND Roman Heger AND Florian Alt AND Stefan Schneegass},
booktitle = {{Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones}},
title = {{Understanding User-Centered Attacks In-The-Wild}},
year = {2020},
note = {abdrabou2020wabds},
series = {WABDS'20},
timestamp = {2020.04.27},
url = {http://florian-alt.org/unibw/wp-content/publications/abdrabou2020wabds.pdf},
}
A. Colley, B. Pfleging, F. Alt, and J. Häkkilä. Exploring Public Wearable Display of Wellness Tracker Data. International Journal of Human-Computer Studies, 2020. doi:10.1016/j.ijhcs.2020.102408
[BibTeX] [Abstract] [PDF]
We investigate wearable presentation of tracked wellness data, and people’s perceptions and motivations for sharing it through a wearable display. Whilst online sharing is a common feature in wellness tracking solutions, the motivations and experiences of users to share tracked data in situ has not been widely studied. We created two functional prototypes – the hat tracker and the tracker badge – which we used as probes in two focus groups to elicit opinions on the content and format of wearable tracker displays. Complementing this, a study where participants used the hat tracker prototype in public locations provides insights on sharing in everyday life use contexts. We report that users appreciate the motivating nature of such displays, but favor the display of positive information. Leveraging prior work, we present a model describing the factors affecting users’ willingness to share tracked data via wearable displays, and highlight such displays’ potential for supporting behavior change.
@Article{colley2020ijhcs,
author = {Ashley Colley and Bastian Pfleging and Florian Alt and Jonna Häkkilä},
journal = {{International Journal of Human-Computer Studies}},
title = {{Exploring Public Wearable Display of Wellness Tracker Data}},
year = {2020},
issn = {1071-5819},
month = jan,
note = {colley2020ijhcs},
abstract = {We investigate wearable presentation of tracked wellness data, and people’s perceptions and motivations for sharing it through a wearable display. Whilst online sharing is a common feature in wellness tracking solutions, the motivations and experiences of users to share tracked data in situ has not been widely studied. We created two functional prototypes – the hat tracker and the tracker badge – which we used as probes in two focus groups to elicit opinions on the content and format of wearable tracker displays. Complementing this, a study where participants used the hat tracker prototype in public locations provides insights on sharing in everyday life use contexts. We report that users appreciate the motivating nature of such displays, but favor the display of positive information. Leveraging prior work, we present a model describing the factors affecting users’ willingness to share tracked data via wearable displays, and highlight such displays’ potential for supporting behavior change.},
doi = {10.1016/j.ijhcs.2020.102408},
timestamp = {2020.01.29},
url = {http://florian-alt.org/unibw/wp-content/publications/colley2020ijhcs.pdf},
}
M. Braun and F. Alt, “Character Computing,” in Character computing, 1 ed., A. E. Bolock, Y. Abdelrahman, and S. Abdennadher, Eds., Springer International Publishing, 2020, p. 15. doi:10.1007/978-3-030-15954-2
[BibTeX] [PDF]
@InBook{braun2020springer,
author = {Michael Braun AND Florian Alt},
chapter = {{Identifying Personality Dimensions for Digital Agents}},
editor = {Alia El Bolock AND Yomna Abdelrahman AND Slim Abdennadher},
pages = {15},
publisher = {Springer International Publishing},
title = {{Character Computing}},
year = {2020},
edition = {1},
isbn = {978-3-030-15954-2},
note = {braun2020springer},
series = {Human-Computer Interaction Series},
booktitle = {Character Computing},
doi = {10.1007/978-3-030-15954-2},
owner = {florian},
timestamp = {2020.01.28},
url = {http://florian-alt.org/unibw/wp-content/publications/braun2020springer.pdf},
}
K. Pfeuffer, L. Mecke, S. Delgado Rodriguez, M. Hassib, H. Maier, and F. Alt. Empirical evaluation of gaze-enhanced menus in virtual reality. In Proceedings of the 26th acm symposium on virtual reality software and technology (VRST ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3385956.3418962
[BibTeX] [Abstract] [PDF]
Many user interfaces involve attention shifts between primary and secondary tasks, e.g., when changing a mode in a menu, which detracts the user from their main task. In this work, we investigate how eye gaze input affords exploiting the attention shifts to enhance the interaction with handheld menus. We assess three techniques for menu selection: dwell time, gaze button, and cursor. Each represents a different multimodal balance between gaze and manual input. We present a user study that compares the techniques against two manual baselines (dunk brush, pointer) in a compound colour selection and line drawing task. We show that user performance with the gaze techniques is comparable to pointer-based menu selection, with less physical effort. Furthermore, we provide an analysis of the trade-off as each technique strives for a unique balance between temporal, manual, and visual interaction properties. Our research points to new opportunities for integrating multimodal gaze in menus and bimanual interfaces in 3D environments.
@InProceedings{pfeuffer2020vrst,
author = {Pfeuffer, Ken and Mecke, Lukas and Delgado Rodriguez, Sarah and Hassib, Mariam and Maier, Hannah and Alt, Florian},
booktitle = {Proceedings of the 26th ACM Symposium on Virtual Reality Software and Technology},
title = {Empirical Evaluation of Gaze-Enhanced Menus in Virtual Reality},
year = {2020},
address = {New York, NY, USA},
note = {pfeuffer2020vrst},
publisher = {Association for Computing Machinery},
series = {VRST '20},
abstract = {Many user interfaces involve attention shifts between primary and secondary tasks, e.g., when changing a mode in a menu, which detracts the user from their main task. In this work, we investigate how eye gaze input affords exploiting the attention shifts to enhance the interaction with handheld menus. We assess three techniques for menu selection: dwell time, gaze button, and cursor. Each represents a different multimodal balance between gaze and manual input. We present a user study that compares the techniques against two manual baselines (dunk brush, pointer) in a compound colour selection and line drawing task. We show that user performance with the gaze techniques is comparable to pointer-based menu selection, with less physical effort. Furthermore, we provide an analysis of the trade-off as each technique strives for a unique balance between temporal, manual, and visual interaction properties. Our research points to new opportunities for integrating multimodal gaze in menus and bimanual interfaces in 3D environments.},
articleno = {20},
doi = {10.1145/3385956.3418962},
isbn = {9781450376198},
keywords = {Gaze, Manual input, Virtual Reality, Design, Menu, Pointing},
location = {Virtual Event, Canada},
numpages = {11},
timestamp = {2020-12-01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeuffer2020vrst.pdf},
}
Y. Abdrabou, F. Alt, C. Katsini, M. Khamis, and G. Raptis. Eyesec: eye-gaze for security applications. In Acm symposium on eye tracking research and applications (ETRA ’21 Short Papers), Association for Computing Machinery, New York, NY, USA, 2020.
[BibTeX] [PDF]
@InProceedings{abdrabou2020etraws,
author = {Yasmeen Abdrabou AND Florian Alt AND Christina Katsini AND Mohamed Khamis and George Raptis},
booktitle = {ACM Symposium on Eye Tracking Research and Applications},
title = {EyeSec: Eye-Gaze for Security Applications},
year = {2020},
note={abdrabou2020etraws},
address = {New York, NY, USA},
publisher = {Association for Computing Machinery},
series = {ETRA '21 Short Papers},
isbn = {9781450383455},
location = {Virtual Event, Germany},
timestamp = {2020-06-01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdrabou2020etraws.pdf},
}

2019

F. Alt and E. von Zezschwitz. Emerging Trends in Usable Security and Privacy. Journal of Interactive Media (icom), vol. 18, iss. 3, 2019. doi:10.1515/icom-2019-0019
[BibTeX] [PDF]
@Article{alt2019icom,
author = {Florian Alt AND Emanuel von Zezschwitz},
journal = {{Journal of Interactive Media (icom)}},
title = {{Emerging Trends in Usable Security and Privacy}},
year = {2019},
month = dec,
note = {alt2019icom},
number = {3},
volume = {18},
doi = {10.1515/icom-2019-0019},
owner = {florian},
timestamp = {2019.12.31},
url = {http://florian-alt.org/unibw/wp-content/publications/alt2019icom.pdf},
}
F. Alt and E. von Zezschwitz. Special Issue: Emerging Trends in Usable Security and Privacy. Journal of interactive media (icom), vol. 18, iss. 3, 2019.
[BibTeX] [PDF]
@Periodical{alt2019icomsi,
title = {{Special Issue: Emerging Trends in Usable Security and Privacy}},
year = {2019},
editor = {Florian Alt AND Emanuel von Zezschwitz},
language = {German},
series = {Journal of Interactive Media},
volume = {18},
number = {3},
organization = {De Gruyter},
month = dec,
note = {alt2019icomsi},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2019icomsi.pdf},
author = {Florian Alt AND Emanuel von Zezschwitz},
journal = {Journal of Interactive Media (icom)},
owner = {florian},
timestamp = {2019.12.30},
}
S. Faltaous, J. Liebers, Y. Abdelrahman, F. Alt, and S. Schneegass. VPID: Towards Vein Pattern Identification Using Thermal Imaging. Journal of Interactive Media (icom), vol. 18, iss. 3, 2019. doi:10.1515/icom-2019-0009
[BibTeX] [PDF]
@Article{faltaous2019icom,
author = {Sarah Faltaous AND Jonathan Liebers AND Yomna Abdelrahman AND Florian Alt AND Stefan Schneegass},
journal = {{Journal of Interactive Media (icom)}},
title = {{VPID: Towards Vein Pattern Identification Using Thermal Imaging}},
year = {2019},
issn = {1618-162X},
month = dec,
note = {faltaous2019icom},
number = {3},
volume = {18},
doi = {10.1515/icom-2019-0009},
owner = {florian},
timestamp = {2019.12.29},
url = {http://florian-alt.org/unibw/wp-content/publications/faltaous2019icom.pdf},
}
H. Drewes, M. Khamis, and F. Alt. DialPlates:Enabling Pursuits-based User Interfaces with Large Target Numbers. In Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia (MUM’19), Association for Computing Machinery, New York, NY, USA, 2019. doi:10.1145/3365610.3365626
[BibTeX] [Abstract] [PDF]
In this paper we introduce a novel approach for smooth pursuitseye movement detection and demonstrate that it allows up to 160targets to be distinguished. With this work we advance the well-established smooth pursuits technique, which allows gaze interac-tion without calibration. The approach is valuable for researchersand practitioners, since it enables novel user interfaces and appli-cations to be created that employ a large number of targets, forexample, a pursuits-based keyboard or a smart home where manydifferent objects can be controlled using gaze. We present findingsfrom two studies. In particular, we compare our novel detectionalgorithm based on linear regression with the correlation method.We quantify its accuracy for around 20 targets on a single circleand up to 160 targets on multiple circles. Finally, we implemented apursuits-based keyboard app with 108 targets as proof-of-concept
@InProceedings{drewes2019mum,
author = {Drewes, Heiko and Khamis, Mohamed and Alt, Florian},
booktitle = {{Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{DialPlates:Enabling Pursuits-based User Interfaces with Large Target Numbers}},
year = {2019},
address = {New York, NY, USA},
note = {drewes2019mum},
publisher = {Association for Computing Machinery},
series = {MUM'19},
abstract = {In this paper we introduce a novel approach for smooth pursuitseye movement detection and demonstrate that it allows up to 160targets to be distinguished. With this work we advance the well-established smooth pursuits technique, which allows gaze interac-tion without calibration. The approach is valuable for researchersand practitioners, since it enables novel user interfaces and appli-cations to be created that employ a large number of targets, forexample, a pursuits-based keyboard or a smart home where manydifferent objects can be controlled using gaze. We present findingsfrom two studies. In particular, we compare our novel detectionalgorithm based on linear regression with the correlation method.We quantify its accuracy for around 20 targets on a single circleand up to 160 targets on multiple circles. Finally, we implemented apursuits-based keyboard app with 108 targets as proof-of-concept},
doi = {10.1145/3365610.3365626},
location = {Pisa, Italy},
timestamp = {2019.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/drewes2019mum.pdf},
}
Y. Abdelrahman, P. Woźniak, P. Knierim, D. Weber, K. Pfeuffer, N. Henze, A. Schmidt, and F. Alt. Exploring the Domestication of Thermal Imaging. In Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia (MUM’19), Association for Computing Machinery, New York, NY, USA, 2019. doi:10.1145/3365610.336564
[BibTeX] [Abstract] [PDF]
Recent work demonstrated the opportunities of thermal imagingin the development of novel interactive systems. However, the explorationis limited to controlled lab setups. Hence, little we knowabout how thermal imaging could be useful for a broader range ofdaily applications by novice users. To investigate the potential of domesticationof thermal imaging, we conducted an exploration witha technology-cultural probe. Ten households (26 individuals) useda mobile thermal camera in their daily life. We collected thermalphotos taken by the participants and conducted interviews afterusing the camera.We found that the users were excited about usingthermal cameras in their everyday lives and found many practicaluses for them. Our study provides insights into how novice userswish to use thermal imaging technology to augment their vision indaily setups, as well as identifying and classifying common thermalimaging use cases. Our work contributes implications for designingthermal imaging devices targeted towards novice users.
@InProceedings{abdelrahman2019mum,
author = {Yomna Abdelrahman and Paweł Woźniak and Pascal Knierim and Dominik Weber and Ken Pfeuffer and Niels Henze and Albrecht Schmidt and Florian Alt},
booktitle = {{Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Exploring the Domestication of Thermal Imaging}},
year = {2019},
address = {New York, NY, USA},
note = {abdelrahman2019mum},
publisher = {Association for Computing Machinery},
series = {MUM'19},
abstract = {Recent work demonstrated the opportunities of thermal imagingin the development of novel interactive systems. However, the explorationis limited to controlled lab setups. Hence, little we knowabout how thermal imaging could be useful for a broader range ofdaily applications by novice users. To investigate the potential of domesticationof thermal imaging, we conducted an exploration witha technology-cultural probe. Ten households (26 individuals) useda mobile thermal camera in their daily life. We collected thermalphotos taken by the participants and conducted interviews afterusing the camera.We found that the users were excited about usingthermal cameras in their everyday lives and found many practicaluses for them. Our study provides insights into how novice userswish to use thermal imaging technology to augment their vision indaily setups, as well as identifying and classifying common thermalimaging use cases. Our work contributes implications for designingthermal imaging devices targeted towards novice users.},
doi = {10.1145/3365610.336564},
location = {Pisa, Italy},
timestamp = {2019.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdelrahman2019mum.pdf},
}
S. Prange, L. Mecke, M. Stadler, M. Balluff, M. Khamis, and F. Alt. Securing Personal Items in Public Space – Stories of Attacks and Threats. In Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia (MUM’19), Association for Computing Machinery, New York, NY, USA, 2019. doi:10.1145/3365610.3365628
[BibTeX] [Abstract] [PDF]
While we put great effort in protecting digital devices and data,there is a lack of research on usable techniques to secure personalitems that we carry in public space. To better understand situationswhere ubiquitous technologies could help secure personal items,we conducted an online survey (N=101) in which we collected real-world stories from users reporting on personal items, either at riskof, or actually being lost, damaged or stolen. We found that themajority of cases occurred in (semi-)public spaces during afternoonand evening times, when users left their items. From these results,we derived a model of incidents involving personal items in publicspace as well as a set of properties to describe situations wherepersonal items may be at risk. We discuss reoccurring properties ofthe scenarios, potential multimedia-based protection mechanismsfor securing personal items in public space as well as future researchsuggestions
@InProceedings{prange2019mum,
author = {Sarah Prange AND Lukas Mecke AND Michael Stadler AND Maximilian Balluff AND Mohamed Khamis and Alt, Florian},
booktitle = {{Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Securing Personal Items in Public Space - Stories of Attacks and Threats}},
year = {2019},
address = {New York, NY, USA},
note = {prange2019mum},
publisher = {Association for Computing Machinery},
series = {MUM'19},
abstract = {While we put great effort in protecting digital devices and data,there is a lack of research on usable techniques to secure personalitems that we carry in public space. To better understand situationswhere ubiquitous technologies could help secure personal items,we conducted an online survey (N=101) in which we collected real-world stories from users reporting on personal items, either at riskof, or actually being lost, damaged or stolen. We found that themajority of cases occurred in (semi-)public spaces during afternoonand evening times, when users left their items. From these results,we derived a model of incidents involving personal items in publicspace as well as a set of properties to describe situations wherepersonal items may be at risk. We discuss reoccurring properties ofthe scenarios, potential multimedia-based protection mechanismsfor securing personal items in public space as well as future researchsuggestions},
doi = {10.1145/3365610.3365628},
location = {Pisa, Italy},
timestamp = {2019.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2019mum.pdf},
}
K. Holländer, A. Colley, C. Mai, J. Häkkilä, F. Alt, and B. Pfleging. Investigating the Influence of External Car Displays on Pedestrians’ Crossing Behavior in Virtual Reality. In Proceedings of the 21st International Conference on Human-Computer Interaction with Mobile Devices and Services (MobileHCI ’19), Association for Computing Machinery, New York, NY, USA, 2019. doi:10.1145/3338286.3340138
[BibTeX] [Abstract] [PDF]
Focusing on pedestrian safety in the era of automated vehicles, we investigate the interaction between pedestrians and automated cars. In particular, we investigate the influence of external car displays (ECDs) on pedestrians’ crossing behavior, and the time needed to make a crossing decision. We present a study in a high-immersion VR environment comparing three alternative car-situated visualizations: a smiling grille, a traffic light style indicator, and a gesturing robotic driver. Crossing at non-designated crossing points on a straight road and at a junction, where vehicles turn towards the pedestrian, are explored. We report that ECDs significantly reduce pedestrians’ decision time, and argue that ECDs support comfort, trust and acceptance in automated vehicles. We believe ECDs might become a valuable addition for future vehicles.
@InProceedings{hollaender2019mobilehci,
author = {Holl\"{a}nder, Kai and Colley, Ashley and Mai, Christian and H\"{a}kkil\"{a}, Jonna and Alt, Florian and Pfleging, Bastian},
booktitle = {{Proceedings of the 21st International Conference on Human-Computer Interaction with Mobile Devices and Services}},
title = {{Investigating the Influence of External Car Displays on Pedestrians' Crossing Behavior in Virtual Reality}},
year = {2019},
address = {New York, NY, USA},
note = {hollaender2019mobilehci},
publisher = {Association for Computing Machinery},
series = {MobileHCI '19},
abstract = {Focusing on pedestrian safety in the era of automated vehicles, we investigate the interaction between pedestrians and automated cars. In particular, we investigate the influence of external car displays (ECDs) on pedestrians' crossing behavior, and the time needed to make a crossing decision. We present a study in a high-immersion VR environment comparing three alternative car-situated visualizations: a smiling grille, a traffic light style indicator, and a gesturing robotic driver. Crossing at non-designated crossing points on a straight road and at a junction, where vehicles turn towards the pedestrian, are explored. We report that ECDs significantly reduce pedestrians' decision time, and argue that ECDs support comfort, trust and acceptance in automated vehicles. We believe ECDs might become a valuable addition for future vehicles.},
articleno = {27},
doi = {10.1145/3338286.3340138},
isbn = {9781450368254},
keywords = {Traffic safety, Virtual reality, External car displays, Pedestrian-autonomous vehicle interaction, Autonomous vehicles},
location = {Taipei, Taiwan},
numpages = {11},
timestamp = {2019.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hollaender2019mobilehci.pdf},
}
Proceedings of Mensch und Computer 2019New York, NY, USA: Association for Computing Machinery, 2019. doi:10.1145/3340764
[BibTeX] [PDF]
@Proceedings{alt2019muc,
title = {{Proceedings of Mensch und Computer 2019}},
year = {2019},
address = {New York, NY, USA},
editor = {Alt, Florian and Bulling, Andreas and D\"{o}ring, Tanja},
isbn = {978-1-4503-7198-8},
note = {alt2019muc},
publisher = {Association for Computing Machinery},
series = {MuC'19},
doi = {10.1145/3340764},
location = {Hamburg, Germany},
timestamp = {2019.09.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2019muc.pdf},
}
M. Braun, R. Chadowitz, and F. A. Alt. User Experience of Driver State Visualizations: a Look at Demographics and Personalities. In Proceedings of the 17th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’19), Springer, Berlin-Heidelberg, Germany, 2019. doi:10.1007/978-3-030-29390-1_9
[BibTeX] [Abstract] [PDF]
Driver state detection is an emerging topic for automotive user interfaces. Motivated by the trend of self-tracking, one crucial question within this eld is how or whether detected states should be displayed. In this work we investigate the impact of demographics and personality traits on the user experience of driver state visualizations. 328 participants experienced three concepts visualizing their current state in a publicly installed driving simulator. Driver age, experience, and personality traits were shown to have impact on visualization preferences. While a continuous display was generally preferred, older respondents and drivers with little experience favored a system with less visual elements. Extroverted participants were more open towards interventions. Our fi ndings lead us to believe that, while users are generally open to driver state detection, its visualization should be adapted to age, driving experience, and personality. This work is meant to support professionals and researchers designing affective in-car information systems.
@InProceedings{braun2019interact,
author = {Braun, Michael and Chadowitz, Ronee and Alt, Florian Alt},
booktitle = {{Proceedings of the 17th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{User Experience of Driver State Visualizations: a Look at Demographics and Personalities}},
year = {2019},
address = {Berlin-Heidelberg, Germany},
month = {4},
note = {braun2019interact},
publisher = {Springer},
series = {INTERACT '19},
abstract = {Driver state detection is an emerging topic for automotive user interfaces. Motivated by the trend of self-tracking, one crucial question within this eld is how or whether detected states should be displayed. In this work we investigate the impact of demographics and personality traits on the user experience of driver state visualizations. 328 participants experienced three concepts visualizing their current state in a publicly installed driving simulator. Driver age, experience, and personality traits were shown to have impact on visualization preferences. While a continuous display was generally preferred, older respondents and drivers with little experience favored a system with less visual elements. Extroverted participants were more open towards interventions. Our findings lead us to believe that, while users are generally open to driver state detection, its visualization should be adapted to age, driving experience, and personality. This work is meant to support professionals and researchers designing affective in-car information systems.},
day = {1},
doi = {10.1007/978-3-030-29390-1_9},
keywords = {Affective Computing, Emotion Detection, Demographics, Personality, Driver State Visualization, Automotive User Interfaces},
language = {English},
location = {Paphos, Cyprus},
owner = {florian},
timestamp = {2019.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019interact.pdf},
}
M. Hassib, M. Braun, B. Pfleging, and F. Alt. Detecting and influencing driver emotions using psycho-physiological sensors and ambient light. In Proceedings of the 17th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’19), Springer, Berlin-Heidelberg, Germany, 2019. doi:10.1007/978-3-030-29381-9_43
[BibTeX] [Abstract] [PDF]
Driving is a sensitive task that is strongly affected by the driver’s emotions. Negative emotions, such as anger, can evidently lead to more driving errors. In this work, we introduce a concept of detecting and influencing driver emotions using psycho-physiological sensing for emotion classi cation and ambient light for feedback. We detect arousal and valence of emotional responses from wearable bio-electric sensors, namely brain-computer interfaces and heart rate sensors. We evaluated our concept in a static driving simulator with a fully equipped car with 12 participants. Before the rides, we elicit negative emotions and evaluate driving performance and physiological data while driving under stressful conditions. We use three ambient lighting conditions (no light, blue, orange). Using a subject-dependent random forests classi er with 40 features collected from physiological data we achieve an average accuracy of 78.9\% for classifying valence and 68.7\% for arousal. Driving performance was enhanced in conditions where ambient lighting was introduced. Both blue and orange light helped drivers to improve lane keeping. We discuss insights from our study and provide design recommendations for designing emotion sensing and feedback systems in the car.
@InProceedings{hassib2019interact,
author = {Mariam Hassib and Michael Braun and Bastian Pfleging and Florian Alt},
booktitle = {{Proceedings of the 17th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Detecting and influencing driver emotions using psycho-physiological sensors and ambient light}},
year = {2019},
address = {Berlin-Heidelberg, Germany},
month = {4},
note = {hassib2019interact},
publisher = {Springer},
series = {INTERACT '19},
abstract = {Driving is a sensitive task that is strongly affected by the driver's emotions. Negative emotions, such as anger, can evidently lead to more driving errors. In this work, we introduce a concept of detecting and influencing driver emotions using psycho-physiological sensing for emotion classication and ambient light for feedback. We detect arousal and valence of emotional responses from wearable bio-electric sensors, namely brain-computer interfaces and heart rate sensors. We evaluated our concept in a static driving simulator with a fully equipped car with 12 participants. Before the rides, we elicit negative emotions and evaluate driving performance and physiological data while driving under stressful conditions. We use three ambient lighting conditions (no light, blue, orange). Using a subject-dependent random forests classier with 40 features collected from physiological data we achieve an average accuracy of 78.9\% for classifying valence and 68.7\% for arousal. Driving performance was enhanced in conditions where ambient lighting was introduced. Both
blue and orange light helped drivers to improve lane keeping. We discuss insights from our study and provide design recommendations for designing emotion sensing and feedback systems in the car.},
day = {1},
doi = {10.1007/978-3-030-29381-9_43},
keywords = {Affective Computing, Automotive UI, EEG, Ambient Light},
language = {English},
location = {Paphos, Cyprus},
owner = {florian},
timestamp = {2019.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019interact2.pdf},
}
L. Mecke, S. D. Rodriguez, D. Buschek, S. Prange, and F. Alt. Communicating Device Confidence Level and Upcoming Re-Authentications in Continuous Authentication Systems on Mobile Devices. In Proceedings of the Fifteenth Symposium on Usable Privacy and Security (SOUPS’19), USENIX, Santa Clara, CA, 2019, p. 289–301. doi:10.5555/3361476.3361498
[BibTeX] [PDF] [Talk] [Slides]
@InProceedings{mecke2019soups1,
author = {Lukas Mecke and Sarah Delgado Rodriguez and Daniel Buschek and Sarah Prange and Florian Alt},
booktitle = {{Proceedings of the Fifteenth Symposium on Usable Privacy and Security}},
title = {{Communicating Device Confidence Level and Upcoming Re-Authentications in Continuous Authentication Systems on Mobile Devices}},
year = {2019},
address = {Santa Clara, CA},
month = aug,
note = {mecke2019soups1},
pages = {289--301},
publisher = {USENIX},
series = {SOUPS'19},
doi = {10.5555/3361476.3361498},
isbn = {978-1-939133-05-2},
slides = {https://www.usenix.org/sites/default/files/conference/protected-files/soups19_slides_mecke.pdf},
talk = {https://youtu.be/eFd7NSt45Oo},
timestamp = {2019.08.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2019soups1.pdf},
}
L. Mecke, D. Buschek, M. Kiermeier, S. Prange, and F. Alt. Exploring Intentional Behaviour Modifications for Password Typing on Mobile Touchscreen Devices. In Fifteenth Symposium on Usable Privacy and Security (SOUPS’19), USENIX, Santa Clara, CA, 2019, p. 303–317. doi:10.5555/3361476.3361499
[BibTeX] [PDF] [Talk] [Slides]
@InProceedings{mecke2019soups2,
author = {Lukas Mecke and Daniel Buschek and Mathias Kiermeier and Sarah Prange and Florian Alt},
booktitle = {{Fifteenth Symposium on Usable Privacy and Security}},
title = {{Exploring Intentional Behaviour Modifications for Password Typing on Mobile Touchscreen Devices}},
year = {2019},
address = {Santa Clara, CA},
month = aug,
note = {mecke2019soups2},
pages = {303--317},
publisher = {USENIX},
series = {SOUPS'19},
doi = {10.5555/3361476.3361499},
isbn = {978-1-939133-05-2},
slides = {https://www.usenix.org/sites/default/files/conference/protected-files/soups2019_slides_mecke_behaviour.pdf},
talk = {https://youtu.be/EzTXFUnGDI0},
timestamp = {2019.08.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2019soups2.pdf},
}
S. Prange, Y. Abdrabou, L. Mecke, and F. Alt. Hidden in Plain Sight:Using Lockscreen Content forAuthentication on Mobile Devices. In Proceedings of the Fifteenth Symposium on Usable Privacy and Security (SOUPS’19), USENIX, Santa Clara, CA, 2019.
[BibTeX] [PDF]
@InProceedings{prange2019soupsadj,
author = {Sarah Prange AND Yasmeen Abdrabou AND Lukas Mecke and Florian Alt},
booktitle = {{Proceedings of the Fifteenth Symposium on Usable Privacy and Security}},
title = {{Hidden in Plain Sight:Using Lockscreen Content forAuthentication on Mobile Devices}},
year = {2019},
address = {Santa Clara, CA},
note = {prange2019soupsadj},
publisher = {USENIX},
series = {SOUPS'19},
timestamp = {2019.08.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2019soupsadj.pdf},
}
S. R. R. Rivu, Y. Abdrabou, T. Mayer, K. Pfeuffer, and F. Alt. GazeButton: Enhancing Buttons with Eye Gaze Interactions. In Proceedings of the 2019 ACM Symposium on Eye Tracking Research & Applications (COGAIN ’19), Association for Computing Machinery, New York, NY, USA, 2019. doi:10.1145/3317956.3318154
[BibTeX] [Abstract] [PDF]
The button is an element of a user interface to trigger an action, traditionally using click or touch. We introduce GazeButton, a novel concept extending the default button mode with advanced gaze-based interactions. During normal interaction, users can utilise this button as a universal hub for gaze-based UI shortcuts. The advantages are: 1) easy to integrate in existing UIs, 2) complementary, as users choose either gaze or manual interaction, 3) straightforward, as all features are located in one button, and 4) one button to interact with the whole screen. We explore GazeButtons for a custom-made text reading, writing, and editing tool on a multitouch tablet device. For example, this allows the text cursor position to be set as users look at the position and tap on the GazeButton, avoiding costly physical movement. Or, users can simply gaze over a part of the text that should be selected, while holding the GazeButton. We present a design space, specific application examples, and point to future button designs that become highly expressive by unifying the user’s visual and manual input.
@InProceedings{rivu2019cogain,
author = {Sheikh Radiah Rahim Rivu AND Yasmeen Abdrabou AND Thomas Mayer AND Ken Pfeuffer AND Florian Alt},
booktitle = {{Proceedings of the 2019 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{GazeButton: Enhancing Buttons with Eye Gaze Interactions}},
year = {2019},
address = {New York, NY, USA},
note = {rivu2019cogain},
publisher = {Association for Computing Machinery},
series = {COGAIN '19},
abstract = {The button is an element of a user interface to trigger an action, traditionally using click or touch. We introduce GazeButton, a novel concept extending the default button mode with advanced gaze-based interactions. During normal interaction, users can utilise this button as a universal hub for gaze-based UI shortcuts. The advantages are: 1) easy to integrate in existing UIs, 2) complementary, as users choose either gaze or manual interaction, 3) straightforward, as all features are located in one button, and 4) one button to interact with the whole screen. We explore GazeButtons for a custom-made text reading, writing, and editing tool on a multitouch tablet device. For example, this allows the text cursor position to be set as users look at the position and tap on the GazeButton, avoiding costly physical movement. Or, users can simply gaze over a part of the text that should be selected, while holding the GazeButton. We present a design space, specific application examples, and point to future button designs that become highly expressive by unifying the user's visual and manual input.},
articleno = {73},
doi = {10.1145/3317956.3318154},
isbn = {9781450367097},
keywords = {touch and gaze, text input, interaction modality},
location = {Denver, Colorado},
numpages = {7},
timestamp = {2019.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2019cogain.pdf},
}
C. George, P. Janssen, D. Heuss, and F. Alt. Should I Interrupt or Not? Understanding Interruptions in Head-Mounted Display Settings. In Proceedings of the 2019 on Designing Interactive Systems Conference (DIS ’19), Association for Computing Machinery, New York, NY, USA, 2019, p. 497–510. doi:10.1145/3322276.3322363
[BibTeX] [Abstract] [PDF]
Head-mounted displays (HMDs) are being used for VR and AR applications and increasingly permeate our everyday life. At the same time, a detailed understanding of interruptions in settings where people wearing an HMD (HMD user) and people not wearing an HMD (bystander) is missing. We investigate (a) whether bystanders are capable of identifying when HMD users switch tasks by observing their gestures, and hence exploit opportune moments for interruptions, and (b) which strategies bystanders employ. In a lab study (N=64) we found that bystanders are able to successfully identify both task switches (83%) and tasks (77%) within only a few seconds of the task switch. Furthermore, we identified interruption strategies of bystanders. From our results we derive implications meant to support designers and practitioners in building HMD applications that are used in a co-located collaborative setting.
@InProceedings{george2019dis,
author = {George, Ceenu and Janssen, Philipp and Heuss, David and Alt, Florian},
booktitle = {{Proceedings of the 2019 on Designing Interactive Systems Conference}},
title = {{Should I Interrupt or Not? Understanding Interruptions in Head-Mounted Display Settings}},
year = {2019},
address = {New York, NY, USA},
note = {george2019dis},
pages = {497–510},
publisher = {Association for Computing Machinery},
series = {DIS '19},
abstract = {Head-mounted displays (HMDs) are being used for VR and AR applications and increasingly
permeate our everyday life. At the same time, a detailed understanding of interruptions
in settings where people wearing an HMD (HMD user) and people not wearing an HMD (bystander)
is missing. We investigate (a) whether bystanders are capable of identifying when
HMD users switch tasks by observing their gestures, and hence exploit opportune moments
for interruptions, and (b) which strategies bystanders employ. In a lab study (N=64)
we found that bystanders are able to successfully identify both task switches (83%)
and tasks (77%) within only a few seconds of the task switch. Furthermore, we identified
interruption strategies of bystanders. From our results we derive implications meant
to support designers and practitioners in building HMD applications that are used
in a co-located collaborative setting.},
doi = {10.1145/3322276.3322363},
isbn = {9781450358507},
keywords = {virtual and augmented reality, gesture, hmd, interruption},
location = {San Diego, CA, USA},
numpages = {14},
owner = {florian},
timestamp = {2019.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/george2019dis.pdf},
}
H. Drewes, K. Pfeuffer, and F. Alt. Time- and Space-efficient Eye Tracker Calibration. In Proceedings of the 2019 ACM Symposium on Eye Tracking Research & Applications (ETRA ’19), Association for Computing Machinery, New York, NY, USA, 2019. doi:10.1145/3314111.3319818
[BibTeX] [Abstract] [PDF]
One of the obstacles to bring eye tracking technology to everyday human computer interactions is the time consuming calibration procedure. In this paper we investigate a novel calibration method based on smooth pursuit eye movement. The method uses linear regression to calculate the calibration mapping. The advantage is that users can perform the calibration quickly in a few seconds and only use a small calibration area to cover a large tracking area. We first describe the theoretical background on establishing a calibration mapping and discuss differences of calibration methods used. We then present a user study comparing the new regression based method with a classical nine-point and with other pursuit based calibrations. The results show the proposed method is fully functional, quick, and enables accurate tracking of a large area. The method has the potential to be integrated into current eye tracking systems to make them more usable in various use cases.
@InProceedings{drewes2019etra,
author = {Drewes, Heiko and Pfeuffer, Ken and Alt, Florian},
booktitle = {{Proceedings of the 2019 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{Time- and Space-efficient Eye Tracker Calibration}},
year = {2019},
address = {New York, NY, USA},
note = {drewes2019etra},
publisher = {Association for Computing Machinery},
series = {ETRA '19},
abstract = {One of the obstacles to bring eye tracking technology to everyday human computer interactions is the time consuming calibration procedure. In this paper we investigate a novel calibration method based on smooth pursuit eye movement. The method uses linear regression to calculate the calibration mapping. The advantage is that users can perform the calibration quickly in a few seconds and only use a small calibration area to cover a large tracking area. We first describe the theoretical background on establishing a calibration mapping and discuss differences of calibration methods used. We then present a user study comparing the new regression based method with a classical nine-point and with other pursuit based calibrations. The results show the proposed method is fully functional, quick, and enables accurate tracking of a large area. The method has the potential to be integrated into current eye tracking systems to make them more usable in various use cases.},
acmid = {3319818},
doi = {10.1145/3314111.3319818},
isbn = {978-1-4503-6709-7},
keywords = {eye-tracking, calibration, eye-tracker, smooth pursuit, eye movement},
location = {Denver, CO, USA},
numpages = {8},
owner = {florian},
timestamp = {2019.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/drewes2019etra.pdf},
}
M. Kattenbeck, M. A. Kilian, M. Ferstl, B. Ludwig, and F. Alt. Towards task-sensitive assistance in public spaces. Aslib Journal of Information Management, 2019. doi:10.1108/AJIM-07-2018-0179
[BibTeX] [Abstract] [PDF]
Purpose Performing tasks in public spaces can be demanding due to task complexity. Systems that can keep track of the current task state may help their users to successfully fulfill a task. These systems, however, require major implementation effort. The purpose of this paper is to investigate if and how a mobile information assistant which has only basic task-tracking capabilities can support users by employing a least effort approach. This means, we are interested in whether such a system is able to have an impact on the way a workflow in public space is perceived. Design/methodology/approach The authors implement and test AIRBOT, a mobile chatbot application that can assist air passengers in successfully boarding a plane. The authors apply a three-tier approach and, first, conduct expert and passenger interviews to understand the workflow and the information needs occurring therein; second, the authors implement a mobile chatbot application providing minimum task-tracking capabilities to support travelers by providing boarding-relevant information in a proactive manner. Finally, the authors evaluate this application by means of an in situ study (n = 101 passengers) at a major European airport. Findings The authors provide evidence that basic task-tracking capabilities are sufficient to affect the users? task perception. AIRBOT is able to decrease the perceived workload airport services impose on users. It has a negative impact on satisfaction with non-personalized information offered by the airport, though. Originality/value The study shows that the number of features is not the most important means to successfully provide assistance in public space workflows. The study can, moreover, serve as a blueprint to design task-based assistants for other contexts.
@Article{kattenbeck2019ajim,
author = {Markus Kattenbeck and Melanie A Kilian and Matthias Ferstl and Bernd Ludwig and Florian Alt},
journal = {{Aslib Journal of Information Management}},
title = {{Towards task-sensitive assistance in public spaces}},
year = {2019},
note = {kattenbeck2019ajim},
abstract = {Purpose
Performing tasks in public spaces can be demanding due to task complexity. Systems that can keep track of the current task state may help their users to successfully fulfill a task. These systems, however, require major implementation effort. The purpose of this paper is to investigate if and how a mobile information assistant which has only basic task-tracking capabilities can support users by employing a least effort approach. This means, we are interested in whether such a system is able to have an impact on the way a workflow in public space is perceived.
Design/methodology/approach
The authors implement and test AIRBOT, a mobile chatbot application that can assist air passengers in successfully boarding a plane. The authors apply a three-tier approach and, first, conduct expert and passenger interviews to understand the workflow and the information needs occurring therein; second, the authors implement a mobile chatbot application providing minimum task-tracking capabilities to support travelers by providing boarding-relevant information in a proactive manner. Finally, the authors evaluate this application by means of an in situ study (n = 101 passengers) at a major European airport.
Findings
The authors provide evidence that basic task-tracking capabilities are sufficient to affect the users? task perception. AIRBOT is able to decrease the perceived workload airport services impose on users. It has a negative impact on satisfaction with non-personalized information offered by the airport, though.
Originality/value
The study shows that the number of features is not the most important means to successfully provide assistance in public space workflows. The study can, moreover, serve as a blueprint to design task-based assistants for other contexts.},
doi = {10.1108/AJIM-07-2018-0179},
keywords = {Human-computer interaction, Assistance system, Cooperative problem solving, In situ study, Mobile information behaviour, Mobile information needs},
publisher = {Emerald Publishing Limited},
timestamp = {2019.06.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/kattenbeck2019ajim.},
}
K. Pfeuffer, M. Geiger, S. Prange, L. Mecke, D. Buschek, and F. Alt. Behavioural Biometrics in VR – Identifying People from Body Motion and Relations in Virtual Reality. In Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems (CHI ’19), Association for Computing Machinery, New York, NY, USA, 2019. doi:10.1145/3290605.3300340
[BibTeX] [Abstract] [PDF]
This paper investigates personalized voice characters for incarspeech interfaces. In particular, we report on how wedesigned different personalities for voice assistants and comparedthem in a real world driving study. Voice assistantshave become important for a wide range of use cases, yet current interfaces are using the same style of auditory responsein every situation, despite varying user needs andpersonalities. To close this gap, we designed four assistantpersonalities (Friend, Admirer, Aunt, and Butler) and comparedthem to a baseline (Default) in a between-subject studyin real traffic conditions. Our results show higher likabilityand trust for assistants that correctly match the user’s personalitywhile we observed lower likability, trust, satisfaction,and usefulness for incorrectly matched personalities, eachin comparison with the Default character. We discuss designaspects for voice assistants in different automotive use cases.
@InProceedings{pfeuffer2019chi,
author = {Ken Pfeuffer AND Matthias Geiger AND Sarah Prange AND Lukas Mecke AND Daniel Buschek AND Florian Alt},
booktitle = {{Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}},
title = {{Behavioural Biometrics in VR - Identifying People from Body Motion and Relations in Virtual Reality}},
year = {2019},
address = {New York, NY, USA},
note = {pfeuffer2019chi},
publisher = {Association for Computing Machinery},
series = {CHI '19},
abstract = {This paper investigates personalized voice characters for incarspeech interfaces. In particular, we report on how wedesigned different personalities for voice assistants and comparedthem in a real world driving study. Voice assistantshave become important for a wide range of use cases, yet current interfaces are using the same style of auditory responsein every situation, despite varying user needs andpersonalities. To close this gap, we designed four assistantpersonalities (Friend, Admirer, Aunt, and Butler) and comparedthem to a baseline (Default) in a between-subject studyin real traffic conditions. Our results show higher likabilityand trust for assistants that correctly match the user’s personalitywhile we observed lower likability, trust, satisfaction,and usefulness for incorrectly matched personalities, eachin comparison with the Default character. We discuss designaspects for voice assistants in different automotive use cases.},
doi = {10.1145/3290605.3300340},
keywords = {Virtual Reality, Behavioural Biometrics, Motion, Relation, Proprioception, Adaptive UIs},
location = {Glasgow, UK},
numpages = {11},
timestamp = {2019.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/pfeuffer2019chi.pdf},
}
M. Braun and F. Alt. Affective Assistants: a Matter ofStates and Traits. In Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems (CHI EA’19), Association for Computing Machinery, New York, NY, USA, 2019.
[BibTeX] [Abstract] [PDF]
This work presents a model for the development of affective assistants based on the pillars of userstates and traits. Traits are defined as long-term qualities like personality, personal experiences,preferences, and demographics, while the user state comprises cognitive load, emotional states, andphysiological parameters. We discuss useful input values and the necessary developments for anadvancement of affective assistants with the example of an affective in-car voice assistant.
@InProceedings{braun2019chiea,
author = {Michael Braun AND Florian Alt},
booktitle = {{Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems}},
title = {{Affective Assistants: a Matter ofStates and Traits}},
year = {2019},
address = {New York, NY, USA},
note = {braun2019chiea},
publisher = {Association for Computing Machinery},
series = {CHI EA'19},
abstract = {This work presents a model for the development of affective assistants based on the pillars of userstates and traits. Traits are defined as long-term qualities like personality, personal experiences,preferences, and demographics, while the user state comprises cognitive load, emotional states, andphysiological parameters. We discuss useful input values and the necessary developments for anadvancement of affective assistants with the example of an affective in-car voice assistant.},
comment = {braun2019chiea},
location = {Glasgow, UK},
timestamp = {2019.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/braun2019chiea.pdf},
}
S. Prange, D. Buschek, K. Pfeuffer, L. Mecke, P. Ehrich, J. Le, and F. Alt. Go for GOLD: Investigating UserBehaviour in Goal-Oriented Tasks. In Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems (CHI EA’19), Association for Computing Machinery, New York, NY, USA, 2019.
[BibTeX] [Abstract] [PDF]
Building adaptive support systems requires a deep understanding of why users get stuck or faceproblems during a goal-oriented task and how they perceive such situations. To investigate this, wefirst chart a problem space, comprising different problem characteristics (complexity, time, availablemeans, and consequences). Secondly, we map them to LEGO assembly tasks. We apply these in alab study equipped with several tracking technologies (i.e., smartwatch sensors and an OptiTracksetup) to assess which problem characteristics lead to measurable consequences in user behaviour.Participants rated occurred problems after each task. With this work, we suggest first steps towardsa) understanding user behaviour in problem situation and b) building upon this knowledge to informthe design of adaptive support systems. As a result, we provide the GOLD dataset (Goal-OrientedLego Dataset) for further analysis.
@InProceedings{prange2019chiea,
author = {Sarah Prange AND Daniel Buschek AND Ken Pfeuffer AND Lukas Mecke AND Peter Ehrich AND Jens Le AND Florian Alt},
booktitle = {{Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems}},
title = {{Go for GOLD: Investigating UserBehaviour in Goal-Oriented Tasks}},
year = {2019},
address = {New York, NY, USA},
note = {prange2019chiea},
publisher = {Association for Computing Machinery},
series = {CHI EA'19},
abstract = {Building adaptive support systems requires a deep understanding of why users get stuck or faceproblems during a goal-oriented task and how they perceive such situations. To investigate this, wefirst chart a problem space, comprising different problem characteristics (complexity, time, availablemeans, and consequences). Secondly, we map them to LEGO assembly tasks. We apply these in alab study equipped with several tracking technologies (i.e., smartwatch sensors and an OptiTracksetup) to assess which problem characteristics lead to measurable consequences in user behaviour.Participants rated occurred problems after each task. With this work, we suggest first steps towardsa) understanding user behaviour in problem situation and b) building upon this knowledge to informthe design of adaptive support systems. As a result, we provide the GOLD dataset (Goal-OrientedLego Dataset) for further analysis.},
comment = {prange2019chiea},
location = {Glasgow, UK},
timestamp = {2019.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2019chiea.pdf},
}
S. Prange, C. Tiefenau, E. von Zezschwitz, and F. Alt. Towards Understanding User Interaction in Future Smart Homes. In Proceedings of CHI ’19 Workshop on New Directions for the IoT: Automate, Share, Build, and Care (CHI ’19 Workshop), Association for Computing Machinery, New York, NY, USA, 2019.
[BibTeX] [Abstract] [PDF]
IoT devices are currently finding their way into peoples homes, providing rich functionality by means of various interaction modalities. We see great potential in collecting and analysing data about users’ interaction with their smart home devices to gain insights about their daily life behaviour for self-reflection as well as security purposes. We present a methodology to study interaction with IoT devices in users’ (smart) homes. Logging daily behaviour usually comes with high effort and often interrupts natural interaction. Hence, we suggest an unobtrusive logging approach by means of a smartwatch and NFC technology. Participants scan interaction with devices using self-placed NFC tags. We tested our method with two flat shares in two cities and provide preliminary insights with regards to the strengths and weaknesses of our study approach.
@InProceedings{prange2019iot,
author = {Sarah Prange AND Christian Tiefenau AND Emanuel von Zezschwitz AND Florian Alt},
booktitle = {{Proceedings of CHI '19 Workshop on New Directions for the IoT: Automate, Share, Build, and Care}},
title = {{Towards Understanding User Interaction in Future Smart Homes}},
year = {2019},
address = {New York, NY, USA},
note = {prange2019iot},
publisher = {Association for Computing Machinery},
series = {CHI '19 Workshop},
abstract = {IoT devices are currently finding their way into peoples homes, providing rich functionality by means of various interaction modalities. We see great potential in collecting and analysing data about users' interaction with their smart home devices to gain insights about their daily life behaviour for self-reflection as well as security purposes. We present a methodology to study interaction with IoT devices in users' (smart) homes. Logging daily behaviour usually comes with high effort and often interrupts natural interaction. Hence, we suggest an unobtrusive logging approach by means of a smartwatch and NFC technology. Participants scan interaction with devices using self-placed NFC tags. We tested our method with two flat shares in two cities and provide preliminary insights with regards to the strengths and weaknesses of our study approach.},
keywords = {IoT, Internet of Things, Smart Home, Smart Devices, NFC, Android, Field Study, Data Collection, In-the-wild},
location = {Glasgow, UK},
numpages = {5},
owner = {florian},
timestamp = {2019.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2019iot.pdf},
}
S. Faltaous, G. Haas, L. Barrios, A. Seiderer, S. F. Rauh, H. J. Chae, S. Schneegass, and F. Alt. BrainShare: A Glimpse of SocialInteraction for Locked-in Syndrome Patients. In Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems (CHI EA ’19), Association for Computing Machinery, New York, NY, USA, 2019, p. 1–6. doi:10.1145/3290607.3312754
[BibTeX] [Abstract] [PDF]
Locked-in syndrome (LIS) patients are partially or entirely paralyzed but fully conscious. Those patients report a high quality of life and desire to remain active in their society and families. We propose a system for enhancing social interactions of LIS patients with their families and friends with the goal of improving their overall quality of life. Our system comprises a Brain-Computer Interface (BCI), augmented-reality glasses, and a screen that shares the view of a caretaker with the patient. This setting targets both patients and caretakers: (1) it allows the patient to experience the outside world through the eyes of the caretaker and (2) it creates a way of active communication between patient and caretaker to convey needs and advice. To validate our approach, we showcased our prototype and conducted interviews that demonstrate the potential benefit for affected patients.
@InProceedings{faltaous2019chiea,
author = {Sarah Faltaous AND Gabriel Haas AND Liliana Barrios AND Andreas Seiderer AND Sebastian Felix Rauh AND Han Joo Chae AND Stefan Schneegass AND Florian Alt},
booktitle = {{Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems}},
title = {{BrainShare: A Glimpse of SocialInteraction for Locked-in Syndrome Patients}},
year = {2019},
address = {New York, NY, USA},
note = {faltaous2019chiea},
pages = {1–6},
publisher = {Association for Computing Machinery},
series = {CHI EA '19},
abstract = {Locked-in syndrome (LIS) patients are partially or entirely paralyzed but fully conscious. Those patients report a high quality of life and desire to remain active in their society and families. We propose a system for enhancing social interactions of LIS patients with their families and friends with the goal of improving their overall quality of life. Our system comprises a Brain-Computer Interface (BCI), augmented-reality glasses, and a screen that shares the view of a caretaker with the patient. This setting targets both patients and caretakers: (1) it allows the patient to experience the outside world through the eyes of the caretaker and (2) it creates a way of active communication between patient and caretaker to convey needs and advice. To validate our approach, we showcased our prototype and conducted interviews that demonstrate the potential benefit for affected patients.},
comment = {faltaous2019chiea},
doi = {10.1145/3290607.3312754},
isbn = {9781450359719},
keywords = {brain-computer interaction, augmented reality, locked-in syndrome, social interaction},
location = {Glasgow, Scotland Uk},
numpages = {6},
timestamp = {2019.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/faltaous2019chiea.pdf},
}
M. Braun, A. Mainz, R. Chadowitz, B. Pfleging, and F. Alt. At Your Service: Designing Voice AssistantPersonalities to Improve Automotive User Interfaces. In Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems (), Association for Computing Machinery, New York, NY, USA, 2019, p. 1–11. doi:10.1145/3290605.3300270
[BibTeX] [Abstract] [PDF] [Talk]
This paper investigates personalized voice characters for in-car speech interfaces. In particular, we report on how we designed different personalities for voice assistants and compared them in a real world driving study. Voice assistants have become important for a wide range of use cases, yet current interfaces are using the same style of auditory response in every situation, despite varying user needs and personalities. To close this gap, we designed four assistant personalities (Friend, Admirer, Aunt, and Butler) and compared them to a baseline (Default) in a between-subject study in real traffic conditions. Our results show higher likability and trust for assistants that correctly match the user’s personality while we observed lower likability, trust, satisfaction, and usefulness for incorrectly matched personalities, each in comparison with the Default character. We discuss design aspects for voice assistants in different automotive use cases.
@InProceedings{braun2019chi,
author = {Michael Braun AND Anja Mainz AND Ronee Chadowitz AND Bastian Pfleging AND Florian Alt},
booktitle = {{Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}},
title = {{At Your Service: Designing Voice AssistantPersonalities to Improve Automotive User Interfaces}},
year = {2019},
address = {New York, NY, USA},
note = {braun2019chi},
pages = {1–11},
publisher = {Association for Computing Machinery},
abstract = {This paper investigates personalized voice characters for in-car speech interfaces. In particular, we report on how we designed different personalities for voice assistants and compared them in a real world driving study. Voice assistants have become important for a wide range of use cases, yet current interfaces are using the same style of auditory response in every situation, despite varying user needs and personalities. To close this gap, we designed four assistant personalities (Friend, Admirer, Aunt, and Butler) and compared them to a baseline (Default) in a between-subject study in real traffic conditions. Our results show higher likability and trust for assistants that correctly match the user's personality while we observed lower likability, trust, satisfaction, and usefulness for incorrectly matched personalities, each in comparison with the Default character. We discuss design aspects for voice assistants in different automotive use cases.},
comment = {braun2019chi},
doi = {10.1145/3290605.3300270},
isbn = {9781450359702},
numpages = {11},
talk = {https://www.youtube.com/watch?v=OuT592PgsDQ},
timestamp = {2019.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019chi.pdf},
}
M. Braun, J. Schubert, B. Pfleging, and F. Alt. Improving Driver Emotions with Affective Strategies. Multimodal Technologies and Interaction, vol. 3, iss. 1, 2019. doi:10.3390/mti3010021
[BibTeX] [Abstract] [PDF]
Drivers in negative emotional states, such as anger or sadness, are prone to perform bad at driving, decreasing overall road safety for all road users. Recent advances in affective computing, however, allow for the detection of such states and give us tools to tackle the connected problems within automotive user interfaces. We see potential in building a system which reacts upon possibly dangerous driver states and influences the driver in order to drive more safely. We compare different interaction approaches for an affective automotive interface, namely Ambient Light, Visual Notification, a Voice Assistant, and an Empathic Assistant. Results of a simulator study with 60 participants (30 each with induced sadness/anger) indicate that an emotional voice assistant with the ability to empathize with the user is the most promising approach as it improves negative states best and is rated most positively. Qualitative data also shows that users prefer an empathic assistant but also resent potential paternalism. This leads us to suggest that digital assistants are a valuable platform to improve driver emotions in automotive environments and thereby enable safer driving.
@Article{braun2019mti,
author = {Braun, Michael and Schubert, Jonas and Pfleging, Bastian and Alt, Florian},
journal = {{Multimodal Technologies and Interaction}},
title = {{Improving Driver Emotions with Affective Strategies}},
year = {2019},
issn = {2414-4088},
note = {braun2019mti},
number = {1},
volume = {3},
abstract = {Drivers in negative emotional states, such as anger or sadness, are prone to perform bad at driving, decreasing overall road safety for all road users. Recent advances in affective computing, however, allow for the detection of such states and give us tools to tackle the connected problems within automotive user interfaces. We see potential in building a system which reacts upon possibly dangerous driver states and influences the driver in order to drive more safely. We compare different interaction approaches for an affective automotive interface, namely Ambient Light, Visual Notification, a Voice Assistant, and an Empathic Assistant. Results of a simulator study with 60 participants (30 each with induced sadness/anger) indicate that an emotional voice assistant with the ability to empathize with the user is the most promising approach as it improves negative states best and is rated most positively. Qualitative data also shows that users prefer an empathic assistant but also resent potential paternalism. This leads us to suggest that digital assistants are a valuable platform to improve driver emotions in automotive environments and thereby enable safer driving.},
article-number = {21},
doi = {10.3390/mti3010021},
owner = {florian},
timestamp = {2019.04.16},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019mti.pdf},
}
R. Häuslschmid, D. Ren, F. Alt, A. Butz, and T. Höllerer. Personalizing Content Presentation on Large 3D Head-Up Displays. PRESENCE: Virtual and Augmented Reality, vol. 27, iss. 1, pp. 80-106, 2019. doi:10.1162/pres_a_00315
[BibTeX] [Abstract] [PDF]
Drivers’ urge to access content on smartphones while driving causes a high number of fatal accidents every year. We explore 3D full-windshield size head-up displays as an opportunity to present such content in a safer manner. In particular, we look into how drivers would personalize such displays and whether it can be considered safe. Firstly, by means of an online survey we identify types of content users access on their smartphones while driving and whether users are interested in the same content on a head-up display. Secondly, we let drivers design personalized 3D layouts and assess how personalization impacts on driving safety. Thirdly, we compare personalized layouts to a one-fits-all layout concept in a 3D driving simulator study regarding safety. We found that drivers’ content preferences diverge largely and that most of the personalized layouts do not respect safety sufficiently. The one-fits-all layout led to a better response performance but needs to be modified to consider the drivers’ preferences. We discuss the implications of the presented research on road safety and future 3D information placement on head-up displays.
@Article{haeuslschmid2019mti,
author = {H\"{a}uslschmid, Renate and Ren, Donhao and Alt, Florian and Butz, Andreas and H\"{o}llerer, Tobias},
journal = {{PRESENCE: Virtual and Augmented Reality}},
title = {{Personalizing Content Presentation on Large 3D Head-Up Displays}},
year = {2019},
note = {haeuslschmid2019mti},
number = {1},
pages = {80-106},
volume = {27},
abstract = {Drivers' urge to access content on smartphones while driving causes a high number of fatal accidents every year. We explore 3D full-windshield size head-up displays as an opportunity to present such content in a safer manner. In particular, we look into how drivers would personalize such displays and whether it can be considered safe. Firstly, by means of an online survey we identify types of content users access on their smartphones while driving and whether users are interested in the same content on a head-up display. Secondly, we let drivers design personalized 3D layouts and assess how personalization impacts on driving safety. Thirdly, we compare personalized layouts to a one-fits-all layout concept in a 3D driving simulator study regarding safety. We found that drivers' content preferences diverge largely and that most of the personalized layouts do not respect safety sufficiently. The one-fits-all layout led to a better response performance but needs to be modified to consider the drivers' preferences. We discuss the implications of the presented research on road safety and future 3D information placement on head-up displays.},
doi = {10.1162/pres\_a\_00315},
eprint = {https://www.mitpressjournals.org/doi/pdf/10.1162/pres_a_00315},
owner = {florian},
timestamp = {2019.04.16},
url = {http://www.florian-alt.org/unibw/wp-content/publications/haeuslschmid2019mti.pdf},
}
M. Braun, N. Broy, B. Pfleging, and F. Alt. Visualizing natural language interaction for conversational in-vehicle information systems to minimize driver distraction. Journal on Multimodal User Interfaces, vol. 13, iss. 2, p. 71–88, 2019. doi:10.1007/s12193-019-00301-2
[BibTeX] [Abstract] [PDF]
In this paper we investigate how natural language interfaces can be integrated with cars in a way such that their influence on driving performance is being minimized. In particular, we focus on how speech-based interaction can be supported through a visualization of the conversation. Our work is motivated by the fact that speech interfaces (like Alexa, Siri, Cortana, etc.) are increasingly finding their way into our everyday life. We expect such interfaces to become commonplace in vehicles in the future. Cars are a challenging environment, since speech interaction here is a secondary task that should not negatively affect the primary task, that is driving. At the outset of our work, we identify the design space for such interfaces. We then compare different visualization concepts in a driving simulator study with 64 participants. Our results yield that (1) text summaries support drivers in recalling information and enhances user experience but can also increase distraction, (2) the use of keywords minimizes cognitive load and influence on driving performance, and (3) the use of icons increases the attractiveness of the interface.
@Article{braun2019JMUI,
author = {Braun, Michael and Broy, Nora and Pfleging, Bastian and Alt, Florian},
journal = {{Journal on Multimodal User Interfaces}},
title = {{Visualizing natural language interaction for conversational in-vehicle information systems to minimize driver distraction}},
year = {2019},
issn = {1783-8738},
month = jun,
note = {braun2019JMUI},
number = {2},
pages = {71--88},
volume = {13},
abstract = {In this paper we investigate how natural language interfaces can be integrated with cars in a way such that their influence on driving performance is being minimized. In particular, we focus on how speech-based interaction can be supported through a visualization of the conversation. Our work is motivated by the fact that speech interfaces (like Alexa, Siri, Cortana, etc.) are increasingly finding their way into our everyday life. We expect such interfaces to become commonplace in vehicles in the future. Cars are a challenging environment, since speech interaction here is a secondary task that should not negatively affect the primary task, that is driving. At the outset of our work, we identify the design space for such interfaces. We then compare different visualization concepts in a driving simulator study with 64 participants. Our results yield that (1) text summaries support drivers in recalling information and enhances user experience but can also increase distraction, (2) the use of keywords minimizes cognitive load and influence on driving performance, and (3) the use of icons increases the attractiveness of the interface.},
day = {01},
doi = {10.1007/s12193-019-00301-2},
owner = {florian},
timestamp = {2019.04.16},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019JMUI.pdf},
}

2018

D. Buschek, M. Hassib, and F. Alt. Personal Mobile Messaging in Context: Chat Augmentations for Expressiveness and Awareness. ACM Transactions on Computer-Human Interaction (ToCHI), vol. 25, iss. 4, p. 23:1–23:33, 2018. doi:10.1145/3201404
[BibTeX] [Abstract] [PDF]
Mobile text messaging is one of the most important communication channels today, but it suffers from lack of expressiveness, context and emotional awareness, compared to face-to-face communication. We address this problem by augmenting text messaging with information about users and contexts. We present and reflect on lessons learned from three field studies, in which we deployed augmentation concepts as prototype chat apps in users’ daily lives. We studied (1) subtly conveying context via dynamic font personalisation (TapScript), (2) integrating and sharing physiological data – namely heart rate – implicitly or explicitly (HeartChat) and (3) automatic annotation of various context cues: music, distance, weather and activities (ContextChat). Based on our studies, we discuss chat augmentation with respect to privacy concerns, understandability, connectedness and inferring context in addition to methodological lessons learned. Finally, we propose a design space for chat augmentation to guide future research, and conclude with practical design implications.
@Article{buschek2018tochi,
author = {Buschek, Daniel and Hassib, Mariam and Alt, Florian},
journal = {{ACM Transactions on Computer-Human Interaction (ToCHI)}},
title = {{Personal Mobile Messaging in Context: Chat Augmentations for Expressiveness and Awareness}},
year = {2018},
issn = {1073-0516},
month = aug,
note = {buschek2018tochi},
number = {4},
pages = {23:1--23:33},
volume = {25},
abstract = {Mobile text messaging is one of the most important communication channels today, but it suffers from lack of expressiveness, context and emotional awareness, compared to face-to-face communication. We address this problem by augmenting text messaging with information about users and contexts. We present and reflect on lessons learned from three field studies, in which we deployed augmentation concepts as prototype chat apps in users’ daily lives. We studied (1) subtly conveying context via dynamic font personalisation (TapScript), (2) integrating and sharing physiological data – namely heart rate – implicitly or explicitly (HeartChat) and (3) automatic annotation of various context cues: music, distance, weather and activities (ContextChat). Based on our studies, we discuss chat augmentation with respect to privacy concerns, understandability, connectedness and inferring context in addition to methodological lessons learned. Finally, we propose a design space for chat augmentation to guide future research, and conclude with practical design implications.},
acmid = {3201404},
address = {New York, NY, USA},
articleno = {23},
doi = {10.1145/3201404},
issue_date = {August 2018},
keywords = {Mobile text messaging, chat context, heart rate, mobile device sensors},
numpages = {33},
publisher = {Association for Computing Machinery},
timestamp = {2019.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2018tochi.pdf},
}
M. Braun, B. Pfleging, and F. Alt. A Survey to Understand Emotional Situations on the Road and What They Mean for Affective Automotive UIs. Multimodal Technologies and Interaction, vol. 2, iss. 4, 2018. doi:10.3390/mti2040075
[BibTeX] [Abstract] [PDF]
In this paper, we present the results of an online survey (N = 170) on emotional situations on the road. In particular, we asked potential early adopters to remember a situation where they felt either an intense positive or negative emotion while driving. Our research is motivated by imminent disruptions in the automotive sector due to automated driving and the accompanying switch to selling driving experiences over horsepower. This creates a need to focus on the driver’s emotion when designing in-car interfaces. As a result of our research, we present a set of propositions for affective car interfaces based on real-life experiences. With our work we aim to support the design of affective car interfaces and give designers a foundation to build upon. We find respondents often connect positive emotions with enjoying their independence, while negative experiences are associated mostly with traffic behavior. Participants who experienced negative situations wished for better information management and a higher degree of automation. Drivers with positive emotions generally wanted to experience the situation more genuinely, for example, by switching to a “back-to-basic” mode. We explore these statements and discuss recommendations for the design of affective interfaces in future cars.
@Article{braun2018mti,
author = {Braun, Michael and Pfleging, Bastian and Alt, Florian},
journal = {{Multimodal Technologies and Interaction}},
title = {{A Survey to Understand Emotional Situations on the Road and What They Mean for Affective Automotive UIs}},
year = {2018},
issn = {2414-4088},
note = {braun2018mti},
number = {4},
volume = {2},
abstract = {In this paper, we present the results of an online survey (N = 170) on emotional situations on the road. In particular, we asked potential early adopters to remember a situation where they felt either an intense positive or negative emotion while driving. Our research is motivated by imminent disruptions in the automotive sector due to automated driving and the accompanying switch to selling driving experiences over horsepower. This creates a need to focus on the driver’s emotion when designing in-car interfaces. As a result of our research, we present a set of propositions for affective car interfaces based on real-life experiences. With our work we aim to support the design of affective car interfaces and give designers a foundation to build upon. We find respondents often connect positive emotions with enjoying their independence, while negative experiences are associated mostly with traffic behavior. Participants who experienced negative situations wished for better information management and a higher degree of automation. Drivers with positive emotions generally wanted to experience the situation more genuinely, for example, by switching to a “back-to-basic” mode. We explore these statements and discuss recommendations for the design of affective interfaces in future cars.},
address = {Basel, Switzerland},
article-number = {75},
doi = {10.3390/mti2040075},
publisher = {MDPI},
timestamp = {2018.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2018mti.pdf},
}
M. Braun, S. T. Völkel, G. Wiegand, T. Puls, D. Steidl, Y. Weiß, and F. Alt. The Smile is The New Like: Controlling Music with Facial Expressions to Minimize Driver Distraction. In Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia (MUM 2018), Association for Computing Machinery, New York, NY, USA, 2018, p. 383–389. doi:10.1145/3282894.3289729
[BibTeX] [Abstract] [PDF]
The control of user interfaces while driving is a textbook example for driver distraction. Modern in-car interfaces are growing in complexity and visual demand, yet they need to stay simple enough to handle while driving. One common approach to solve this problem are multimodal interfaces, incorporating e.g. touch, speech, and mid-air gestures for the control of distinct features. This allows for an optimization of used cognitive resources and can relieve the driver of potential overload. We introduce a novel modality for in-car interaction: our system allows drivers to use facial expressions to control a music player. The results of a user study show that both implicit emotion recognition and explicit facial expressions are applicable for music control in cars. Subconscious emotion recognition could decrease distraction, while explicit expressions can be used as an alternative input modality. A simple smiling gesture showed good potential, e.g. to save favorite songs.
@InProceedings{braun18mumadj,
author = {Braun, Michael and V\"{o}lkel, Sarah Theres and Wiegand, Gesa and Puls, Thomas and Steidl, Daniel and Wei\ss, Yannick and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{The Smile is The New Like: Controlling Music with Facial Expressions to Minimize Driver Distraction}},
year = {2018},
address = {New York, NY, USA},
note = {braun2018mumadj},
pages = {383--389},
publisher = {Association for Computing Machinery},
series = {MUM 2018},
abstract = {The control of user interfaces while driving is a textbook example for driver distraction. Modern in-car interfaces are growing in complexity and visual demand, yet they need to stay simple enough to handle while driving. One common approach to solve this problem are multimodal interfaces, incorporating e.g. touch, speech, and mid-air gestures for the control of distinct features. This allows for an optimization of used cognitive resources and can relieve the driver of potential overload. We introduce a novel modality for in-car interaction: our system allows drivers to use facial expressions to control a music player. The results of a user study show that both implicit emotion recognition and explicit facial expressions are applicable for music control in cars. Subconscious emotion recognition could decrease distraction, while explicit expressions can be used as an alternative input modality. A simple smiling gesture showed good potential, e.g. to save favorite songs.},
acmid = {3289729},
doi = {10.1145/3282894.3289729},
isbn = {978-1-4503-6594-9},
keywords = {Affective Computing, Automotive User Interfaces, Driver Distraction, Face Recognition, Multimodal Interaction},
location = {Cairo, Egypt},
numpages = {7},
timestamp = {2018.11.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2018mumadj.pdf},
}
N. Müller, B. Eska, R. Schäffer, S. T. Völkel, M. Braun, G. Wiegand, and F. Alt. Arch’N’Smile: A Jump’N’Run Game Using Facial Expression Recognition Control For Entertaining Children During Car Journeys. In Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia (MUM’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 335–339. doi:10.1145/3282894.3282918
[BibTeX] [Abstract] [PDF]
Children can be a distraction to the driver during a car ride. With our work, we try to combine the possibility of facial expression recognition in the car with a game for children. The goal is that the parents can focus on the driving task while the child is busy and entertained. We conducted a study with children and parents in a real driving situation. It turned out that children can handle and enjoy games with facial recognition controls, which leads us to the conclusion that face recognition in the car as a entertaining system for children should be developed further to exploit its full potential.
@InProceedings{mueller2018mum,
author = {M\"{u}ller, Niklas and Eska, Bettina and Sch\"{a}ffer, Richard and V\"{o}lkel, Sarah Theres and Braun, Michael and Wiegand, Gesa and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Arch'N'Smile: A Jump'N'Run Game Using Facial Expression Recognition Control For Entertaining Children During Car Journeys}},
year = {2018},
address = {New York, NY, USA},
note = {mueller2018mum},
pages = {335--339},
publisher = {Association for Computing Machinery},
series = {MUM'18},
abstract = {Children can be a distraction to the driver during a car ride. With our work, we try to combine the possibility of facial expression recognition in the car with a game for children. The goal is that the parents can focus on the driving task while the child is busy and entertained. We conducted a study with children and parents in a real driving situation. It turned out that children can handle and enjoy games with facial recognition controls, which leads us to the conclusion that face recognition in the car as a entertaining system for children should be developed further to exploit its full potential.},
acmid = {3282918},
doi = {10.1145/3282894.3282918},
isbn = {978-1-4503-6594-9},
keywords = {Children, Distraction, Driving, Entertainment, Face Recognition, Facial Expression, Game},
location = {Cairo, Egypt},
numpages = {5},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2018mum.pdf},
}
H. Drewes, M. Khamis, and F. Alt. Smooth Pursuit Target Speeds and Trajectories. In Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia (MUM’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 139–146. doi:10.1145/3282894.3282913
[BibTeX] [Abstract] [PDF]
In this paper we present an investigation of how the speed and trajectory of smooth pursuits targets impact on detection rates in gaze interfaces. Previous work optimized these values for the specific application for which smooth pursuit eye movements were employed. However, this may not always be possible. For example UI designers may want to minimize distraction caused by the stimulus, integrate it with a certain UI element (e.g., a button), or limit it to a certain area of the screen. In these cases an in-depth understanding of the interplay between speed, trajectory, and accuracy is required. To achieve this, we conducted a user study with 15 participants who had to follow targets with different speeds and on different trajectories using their gaze. We evaluated the data with respect to detectability. As a result, we obtained reasonable ranges for target speeds and demonstrate the effects of trajectory shapes. We show that slow moving targets are hard to detect by correlation and that introducing a delay improves the detection rate for fast moving targets. Our research is complemented by design rules which enable designers to implement better pursuit detectors and pursuit-based user interfaces.
@InProceedings{drewes2018mum,
author = {Drewes, Heiko and Khamis, Mohamed and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Smooth Pursuit Target Speeds and Trajectories}},
year = {2018},
address = {New York, NY, USA},
note = {drewes2018mum},
pages = {139--146},
publisher = {Association for Computing Machinery},
series = {MUM'18},
abstract = {In this paper we present an investigation of how the speed and trajectory of smooth pursuits targets impact on detection rates in gaze interfaces. Previous work optimized these values for the specific application for which smooth pursuit eye movements were employed. However, this may not always be possible. For example UI designers may want to minimize distraction caused by the stimulus, integrate it with a certain UI element (e.g., a button), or limit it to a certain area of the screen. In these cases an in-depth understanding of the interplay between speed, trajectory, and accuracy is required. To achieve this, we conducted a user study with 15 participants who had to follow targets with different speeds and on different trajectories using their gaze. We evaluated the data with respect to detectability. As a result, we obtained reasonable ranges for target speeds and demonstrate the effects of trajectory shapes. We show that slow moving targets are hard to detect by correlation and that introducing a delay improves the detection rate for fast moving targets. Our research is complemented by design rules which enable designers to implement better pursuit detectors and pursuit-based user interfaces.},
acmid = {3282913},
doi = {10.1145/3282894.3282913},
isbn = {978-1-4503-6594-9},
keywords = {Eye tracking, pursuit detection, smooth pursuits, trajectories},
location = {Cairo, Egypt},
numpages = {8},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/drewes2018mum.pdf},
}
L. Mecke, K. Pfeuffer, S. Prange, and F. Alt. Open Sesame!: User Perception of Physical, Biometric, and Behavioural Authentication Concepts to Open Doors. In Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia (MUM’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 153–159. doi:10.1145/3282894.3282923
[BibTeX] [Abstract] [PDF]
In usable security (e.g., smartphone authentication), a lot of emphasis is put on low-effort authentication and access concepts. Yet, only very few approaches exist where such concepts are applied beyond digital devices. We investigate and explore seamless authentication systems at doors, where most currently used systems for seamless access rely on the use of tokens. In a Wizard-of-Oz study, we investigate three different authentication schemes, namely (1) key, (2) palm vein scanner and (3) gait-based authentication (compare Fig. 1). Most participants in our study (N=15) preferred the palm vein scanner, while ranking unlocking with a key and gait-based recognition second and third. Our results propose that recovery costs for a failed authentication attempt have an impact on user perception. Furthermore, while the participants appreciated seamless authentication via biometrics, they also valued the control they gain from the possession of a physical token.
@InProceedings{mecke2018mum,
author = {Mecke, Lukas and Pfeuffer, Ken and Prange, Sarah and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Open Sesame!: User Perception of Physical, Biometric, and Behavioural Authentication Concepts to Open Doors}},
year = {2018},
address = {New York, NY, USA},
note = {mecke2018mum},
pages = {153--159},
publisher = {Association for Computing Machinery},
series = {MUM'18},
abstract = {In usable security (e.g., smartphone authentication), a lot of emphasis is put on low-effort authentication and access concepts. Yet, only very few approaches exist where such concepts are applied beyond digital devices. We investigate and explore seamless authentication systems at doors, where most currently used systems for seamless access rely on the use of tokens. In a Wizard-of-Oz study, we investigate three different authentication schemes, namely (1) key, (2) palm vein scanner and (3) gait-based authentication (compare Fig. 1). Most participants in our study (N=15) preferred the palm vein scanner, while ranking unlocking with a key and gait-based recognition second and third. Our results propose that recovery costs for a failed authentication attempt have an impact on user perception. Furthermore, while the participants appreciated seamless authentication via biometrics, they also valued the control they gain from the possession of a physical token.},
acmid = {3282923},
doi = {10.1145/3282894.3282923},
isbn = {978-1-4503-6594-9},
keywords = {(Behavioural) Biometrics, Authentication, User Perception, Wizard-of-Oz},
location = {Cairo, Egypt},
numpages = {7},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2018mum.pdf},
}
S. Prange, D. Buschek, and F. Alt. An Exploratory Study on Correlations of Hand Size and Mobile Touch Interactions. In Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia (MUM’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 279–283. doi:10.1145/3282894.3282924
[BibTeX] [Abstract] [PDF]
We report on an exploratory study investigating the relationship of users’ hand sizes and aspects of their mobile touch interactions. Estimating hand size from interaction could inform, for example, UI adaptation, occlusion-aware UIs, and biometrics. We recorded touch data from 62 participants performing six touch tasks on a smartphone. Our results reveal considerable correlations between hand size and aspects of touch interaction, both for tasks with unrestricted “natural” postures and restricted hand locations. We discuss implications for applications and ideas for future work.
@InProceedings{prange2018mum,
author = {Prange, Sarah and Buschek, Daniel and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{An Exploratory Study on Correlations of Hand Size and Mobile Touch Interactions}},
year = {2018},
address = {New York, NY, USA},
note = {prange2018mum},
pages = {279--283},
publisher = {Association for Computing Machinery},
series = {MUM'18},
abstract = {We report on an exploratory study investigating the relationship of users' hand sizes and aspects of their mobile touch interactions. Estimating hand size from interaction could inform, for example, UI adaptation, occlusion-aware UIs, and biometrics. We recorded touch data from 62 participants performing six touch tasks on a smartphone. Our results reveal considerable correlations between hand size and aspects of touch interaction, both for tasks with unrestricted "natural" postures and restricted hand locations. We discuss implications for applications and ideas for future work.},
acmid = {3282924},
doi = {10.1145/3282894.3282924},
isbn = {978-1-4503-6594-9},
keywords = {Correlation, Hand Size, Scrolling, Swiping, Targeting, Touch},
location = {Cairo, Egypt},
numpages = {5},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2018mum.pdf},
}
C. Mai, T. Wiltzius, F. Alt, and H. Huß{}mann. Feeling Alone in Public: Investigating the Influence of Spatial Layout on Users’ VR Experience. In Proceedings of the 10th Nordic Conference on Human-Computer Interaction (NordiCHI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 286–298. doi:10.1145/3240167.3240200
[BibTeX] [Abstract] [PDF]
We investigate how spatial layout in public environments like workplaces, fairs, or conferences influences a user’s VR experience. In particular, we compare environments in which an HMD user is (a) surrounded by other people, (b) physically separated by a barrier, or (c) in a separate room. In contrast to lab environments, users in public environments are affected by physical threats (for example, other people in the space running into them) but also cognitive threats (for example, not knowing, what happens in the real world), as known from research on proxemics or social facilitation. We contribute an extensive discussion of the factors influencing a user’s VR experience in public. Based on this we conducted a between-subject design user study (N=58) to understand the differences between the three environments. As a result, we present implications regarding (1) spatial layout, (2) behavior of the VR system operator, and (3) the VR experience that helps both HCI researchers as well as practitioners to enhance users’ VR experience in public environments.
@InProceedings{mai2018nordichi,
author = {Mai, Christian and Wiltzius, Tim and Alt, Florian and Hu\ss{}mann, Heinrich},
booktitle = {{Proceedings of the 10th Nordic Conference on Human-Computer Interaction}},
title = {{Feeling Alone in Public: Investigating the Influence of Spatial Layout on Users' VR Experience}},
year = {2018},
address = {New York, NY, USA},
note = {mai2018nordichi},
pages = {286–298},
publisher = {Association for Computing Machinery},
series = {NordiCHI '18},
abstract = {We investigate how spatial layout in public environments like workplaces, fairs, or conferences influences a user's VR experience. In particular, we compare environments in which an HMD user is (a) surrounded by other people, (b) physically separated by a barrier, or (c) in a separate room. In contrast to lab environments, users in public environments are affected by physical threats (for example, other people in the space running into them) but also cognitive threats (for example, not knowing, what happens in the real world), as known from research on proxemics or social facilitation. We contribute an extensive discussion of the factors influencing a user's VR experience in public. Based on this we conducted a between-subject design user study (N=58) to understand the differences between the three environments. As a result, we present implications regarding (1) spatial layout, (2) behavior of the VR system operator, and (3) the VR experience that helps both HCI researchers as well as practitioners to enhance users' VR experience in public environments.},
doi = {10.1145/3240167.3240200},
isbn = {9781450364379},
keywords = {head-mounted displays, public spaces, user experience, virtual reality},
location = {Oslo, Norway},
numpages = {13},
timestamp = {2018.10.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mai2018nordichi.pdf},
}
M. Braun, S. Weiser, B. Pfleging, and F. Alt. A Comparison of Emotion Elicitation Methods for Affective Driving Studies. In Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications (AutomotiveUI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 77–81. doi:10.1145/3239092.3265945
[BibTeX] [Abstract] [PDF]
Advances in sensing technology enable the emotional state of car drivers to be captured and interfaces to be built that respond to these emotions. To evaluate such emotion-aware interfaces, researchers need to evoke certain emotional states within participants. Emotion elicitation in driving studies poses a challenge as the driving task can interfere with the elicitation task. Induced emotions also lose intensity with time and through secondary tasks. This is why we have analyzed different emotion elicitation techniques for their suitability in automotive research and compared the most promising approaches in a user study. We recommend using autobiographical recollection to induce emotions in driving studies, and suggest a way to prolong emotional states with music playback. We discuss experiences from a a driving simulator study, including solutions for addressing potential privacy issues.
@InProceedings{braun2018autouiadj1,
author = {Braun, Michael and Weiser, Simon and Pfleging, Bastian and Alt, Florian},
booktitle = {{Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications}},
title = {{A Comparison of Emotion Elicitation Methods for Affective Driving Studies}},
year = {2018},
address = {New York, NY, USA},
note = {braun2018autouiadj1},
pages = {77--81},
publisher = {Association for Computing Machinery},
series = {AutomotiveUI '18},
abstract = {Advances in sensing technology enable the emotional state of car drivers to be captured and interfaces to be built that respond to these emotions. To evaluate such emotion-aware interfaces, researchers need to evoke certain emotional states within participants. Emotion elicitation in driving studies poses a challenge as the driving task can interfere with the elicitation task. Induced emotions also lose intensity with time and through secondary tasks. This is why we have analyzed different emotion elicitation techniques for their suitability in automotive research and compared the most promising approaches in a user study. We recommend using autobiographical recollection to induce emotions in driving studies, and suggest a way to prolong emotional states with music playback. We discuss experiences from a a driving simulator study, including solutions for addressing potential privacy issues.},
acmid = {3265945},
doi = {10.1145/3239092.3265945},
isbn = {978-1-4503-5947-4},
keywords = {Affective Computing, Driving Studies, Emotion Elicitation},
location = {Toronto, ON, Canada},
numpages = {5},
timestamp = {2018.10.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2018autouiadj1.pdf},
}
M. Braun, F. Roider, F. Alt, and T. Gross. Automotive Research in the Public Space: Towards Deployment-Based Prototypes For Real Users. In Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications (AutomotiveUI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 181–185. doi:10.1145/3239092.3265964
[BibTeX] [Abstract] [PDF]
Many automotive user studies allow users to experience and evaluate interactive concepts. They are however often limited to small and specific groups of participants, such as students or experts. This might limit the generalizability of results for future users. A possible solution is to allow a large group of unbiased users to actively experience an interactive prototype and generate new ideas, but there is little experience about the realization and benefits of such an approach. We placed an interactive prototype in a public space and gathered objective and subjective data from 693 participants over the course of three months. We found a high variance in data quality and identified resulting restrictions for suitable research questions. This results in concrete requirements to hardware, software, and analytics, e.g. the need for assessing data quality, and give examples how this approach lets users explore a system and give first-contact feedback which differentiates highly from common in-depth expert analyses.
@InProceedings{braun2018autouiadj2,
author = {Braun, Michael and Roider, Florian and Alt, Florian and Gross, Tom},
booktitle = {{Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications}},
title = {{Automotive Research in the Public Space: Towards Deployment-Based Prototypes For Real Users}},
year = {2018},
address = {New York, NY, USA},
note = {braun2018autouiadj2},
pages = {181--185},
publisher = {Association for Computing Machinery},
series = {AutomotiveUI '18},
abstract = {Many automotive user studies allow users to experience and evaluate interactive concepts. They are however often limited to small and specific groups of participants, such as students or experts. This might limit the generalizability of results for future users. A possible solution is to allow a large group of unbiased users to actively experience an interactive prototype and generate new ideas, but there is little experience about the realization and benefits of such an approach. We placed an interactive prototype in a public space and gathered objective and subjective data from 693 participants over the course of three months. We found a high variance in data quality and identified resulting restrictions for suitable research questions. This results in concrete requirements to hardware, software, and analytics, e.g. the need for assessing data quality, and give examples how this approach lets users explore a system and give first-contact feedback which differentiates highly from common in-depth expert analyses.},
acmid = {3265964},
doi = {10.1145/3239092.3265964},
isbn = {978-1-4503-5947-4},
keywords = {Automotive UI, Deployment, Prototypes, User Studies},
location = {Toronto, ON, Canada},
numpages = {5},
timestamp = {2018.10.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2018autouiadj2.pdf},
}
M. Braun, A. Frison, S. T. Völkel, F. Alt, H. Hussmann, and A. Riener. Beyond Transportation: How to Keep Users Attached When They Are Neither Driving nor Owning Automated Cars?. In Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications (AutomotiveUI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 175–180. doi:10.1145/3239092.3265963
[BibTeX] [Abstract] [PDF]
The way drivers relate to cars is likely bound to change with the rise of automated vehicles and new ownership models. However, personal relationships towards products are an important part of buying decisions. Car manufacturers thus need to provide novel bonding experiences for their future customers in order to stay competitive. We introduce a vehicle attachment model based on related work from other domains. In interviews with 16 car owners we verify the approach as promising and derive four attachment types by applying the model: interviewees’ personal attachments were grounded on either self-empowering reasons, memories with the car, increased status, or a loving friendship towards their car. We propose how to address the needs of these four attachment types as a first step towards emotionally irreplaceable automated and shared vehicles.
@InProceedings{braun2018autouiadj3,
author = {Braun, Michael and Frison, Anna-Katharina and V\"{o}lkel, Sarah Theres and Alt, Florian and Hussmann, Heinrich and Riener, Andreas},
booktitle = {{Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications}},
title = {{Beyond Transportation: How to Keep Users Attached When They Are Neither Driving nor Owning Automated Cars?}},
year = {2018},
address = {New York, NY, USA},
note = {braun2018autouiadj3},
pages = {175--180},
publisher = {Association for Computing Machinery},
series = {AutomotiveUI '18},
abstract = {The way drivers relate to cars is likely bound to change with the rise of automated vehicles and new ownership models. However, personal relationships towards products are an important part of buying decisions. Car manufacturers thus need to provide novel bonding experiences for their future customers in order to stay competitive. We introduce a vehicle attachment model based on related work from other domains. In interviews with 16 car owners we verify the approach as promising and derive four attachment types by applying the model: interviewees' personal attachments were grounded on either self-empowering reasons, memories with the car, increased status, or a loving friendship towards their car. We propose how to address the needs of these four attachment types as a first step towards emotionally irreplaceable automated and shared vehicles.},
acmid = {3265963},
doi = {10.1145/3239092.3265963},
isbn = {978-1-4503-5947-4},
keywords = {Automated Cars, Car Sharing, Vehicle Attachment},
location = {Toronto, ON, Canada},
numpages = {6},
timestamp = {2018.10.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2018autouiadj3.pdf},
}
M. Kattenbeck, M. A. Kilian, M. Ferstl, F. Alt, and B. Ludwig. Airbot: Using a Work Flow Model for Proactive Assistance in Public Spaces. In Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct (MobileHCI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 213–220. doi:10.1145/3236112.3236142
[BibTeX] [PDF]
@InProceedings{kattenbeck2018mobilehciadj,
author = {Kattenbeck, Markus and Kilian, Melanie A. and Ferstl, Matthias and Alt, Florian and Ludwig, Bernd},
booktitle = {{Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct}},
title = {{Airbot: Using a Work Flow Model for Proactive Assistance in Public Spaces}},
year = {2018},
address = {New York, NY, USA},
note = {kattenbeck2018mobilehciadj},
pages = {213--220},
publisher = {Association for Computing Machinery},
series = {MobileHCI '18},
acmid = {3236142},
doi = {10.1145/3236112.3236142},
isbn = {978-1-4503-5941-2},
keywords = {assistance system, cooperative problem solving, human-computer interaction, mobile information needs},
location = {Barcelona, Spain},
numpages = {8},
timestamp = {2018.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/kattenbeck2018mobilehciadj},
}
R. Poguntke, C. Tasci, O. Korhonen, F. Alt, and S. Schneegass. AVotar: Exploring Personalized Avatars for Mobile Interaction with Public Displays. In Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct (MobileHCI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 1–8. doi:10.1145/3236112.3236113
[BibTeX] [Abstract] [PDF]
Engaging users with public displays has been a major challenge in public display research. Interactive displays often suffer from being ignored by potential users. Research showed that user representations are a valid way to partially address this challenge, e.g., by attracting attention, conveying interactivity, and serving as entry points to gestures and touch interaction. We believe that user representations, particularly personalized avatars, could further increase the attractiveness of public displays, if carefully designed. In this work, we provide first insights on how such avatars can be designed and which properties are important for users. In particular, we present AVotar, a voting application for mobiles that lets users design avatars being utilized to represent them. In an user study we found that users appreciate high degrees of freedom in customization and focus on expressive facial features. Finally, we discuss the findings yielding useful implications for designers of future public display applications employing avatars.
@InProceedings{poguntke2018mobilehciadj,
author = {Poguntke, Romina and Tasci, Cagri and Korhonen, Olli and Alt, Florian and Schneegass, Stefan},
booktitle = {{Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct}},
title = {{AVotar: Exploring Personalized Avatars for Mobile Interaction with Public Displays}},
year = {2018},
address = {New York, NY, USA},
note = {poguntke2018mobilehciadj},
pages = {1--8},
publisher = {Association for Computing Machinery},
series = {MobileHCI '18},
abstract = {Engaging users with public displays has been a major challenge in public display research. Interactive displays often suffer from being ignored by potential users. Research showed that user representations are a valid way to partially address this challenge, e.g., by attracting attention, conveying interactivity, and serving as entry points to gestures and touch interaction. We believe that user representations, particularly personalized avatars, could further increase the attractiveness of public displays, if carefully designed. In this work, we provide first insights on how such avatars can be designed and which properties are important for users. In particular, we present AVotar, a voting application for mobiles that lets users design avatars being utilized to represent them. In an user study we found that users appreciate high degrees of freedom in customization and focus on expressive facial features. Finally, we discuss the findings yielding useful implications for designers of future public display applications employing avatars.},
acmid = {3236113},
doi = {10.1145/3236112.3236113},
isbn = {978-1-4503-5941-2},
keywords = {avatars, engagement, personalization, public displays, user representation},
location = {Barcelona, Spain},
numpages = {8},
timestamp = {2018.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/poguntke2018mobilehciadj},
}
M. Khamis, L. Trotter, V. Mäkelä, E. von Zezschwitz, J. Le, A. Bulling, and F. Alt. CueAuth: Comparing Touch, Mid-Air Gestures, and Gaze for Cue-based Authentication on Situated Displays. Proceeding of the ACM on Interactive Mobile Wearable Ubiquitous Technologies (IMWUT), vol. 2, iss. 4, p. 174:1–174:22, 2018. doi:10.1145/3287052
[BibTeX] [Abstract] [PDF]
{Secure authentication on situated displays (e.g., to access sensitive information or to make purchases) is becoming increasingly important. A promising approach to resist shoulder surfing attacks is to employ cues that users respond to while authenticating; this overwhelms observers by requiring them to observe both the cue itself as well as users’ response to the cue. Although previous work proposed a variety of modalities, such as gaze and mid-air gestures, to further improve security, an understanding of how they compare with regard to usability and security is still missing as of today. In this paper, we rigorously compare modalities for cue-based authentication on situated displays. In particular, we provide the first comparison between touch, mid-air gestures, and calibration-free gaze using a state-of-the-art authentication concept. In two in-depth user studies (N=20
@Article{khamis2018imwut2,
author = {Khamis, Mohamed and Trotter, Ludwig and M\"{a}kel\"{a}, Ville and Zezschwitz, Emanuel von and Le, Jens and Bulling, Andreas and Alt, Florian},
journal = {{Proceeding of the ACM on Interactive Mobile Wearable Ubiquitous Technologies (IMWUT)}},
title = {{CueAuth: Comparing Touch, Mid-Air Gestures, and Gaze for Cue-based Authentication on Situated Displays}},
year = {2018},
issn = {2474-9567},
month = dec,
note = {khamis2018imwut},
number = {4},
pages = {174:1--174:22},
volume = {2},
abstract = {Secure authentication on situated displays (e.g., to access sensitive information or to make purchases) is becoming increasingly important. A promising approach to resist shoulder surfing attacks is to employ cues that users respond to while authenticating; this overwhelms observers by requiring them to observe both the cue itself as well as users' response to the cue. Although previous work proposed a variety of modalities, such as gaze and mid-air gestures, to further improve security, an understanding of how they compare with regard to usability and security is still missing as of today. In this paper, we rigorously compare modalities for cue-based authentication on situated displays. In particular, we provide the first comparison between touch, mid-air gestures, and calibration-free gaze using a state-of-the-art authentication concept. In two in-depth user studies (N=20, N=17) we found that the choice of touch or gaze presents a clear tradeoff between usability and security. For example, while gaze input is more secure, it is also more demanding and requires longer authentication times. Mid-air gestures are slightly slower and more secure than touch but users hesitate to use them in public. We conclude with three significant design implications for authentication using touch, mid-air gestures, and gaze and discuss how the choice of modality creates opportunities and challenges for improved authentication in public.},
acmid = {3287052},
address = {New York, NY, USA},
articleno = {174},
doi = {10.1145/3287052},
issue_date = {December 2018},
keywords = {Eye Tracking, Privacy, Public Displays, Pursuits, SwiPIN},
numpages = {22},
publisher = {Association for Computing Machinery},
timestamp = {2018.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018imwut.pdf},
}
M. Khamis, A. Kienle, F. Alt, and A. Bulling. GazeDrone: Mobile Eye-Based Interaction in Public Space Without Augmenting the User. In 4th ACM Workshop on Micro Aerial Vehicle Networks, Systems, and Applications (DroNet’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 66–71. doi:10.1145/3213526.3213539
[BibTeX] [Abstract] [PDF]
Gaze interaction holds a lot of promise for seamless human-computer interaction. At the same time, current wearable mobile eye trackers require user augmentation that negatively impacts natural user behavior while remote trackers require users to position themselves within a confined tracking range. We present GazeDrone, the first system that combines a camera-equipped aerial drone with a computational method to detect sidelong glances for spontaneous (calibration-free) gaze-based interaction with surrounding pervasive systems (e.g., public displays). GazeDrone does not require augmenting each user with on-body sensors and allows interaction from arbitrary positions, even while moving. We demonstrate that drone-supported gaze interaction is feasible and accurate for certain movement types. It is well-perceived by users, in particular while interacting from a fixed position as well as while moving orthogonally or diagonally to a display. We present design implications and discuss opportunities and challenges for drone-supported gaze interaction in public.
@InProceedings{khamis2018dronet,
author = {Mohamed Khamis and Anna Kienle and Florian Alt and Andreas Bulling},
booktitle = {{4th ACM Workshop on Micro Aerial Vehicle Networks, Systems, and Applications}},
title = {{GazeDrone: Mobile Eye-Based Interaction in Public Space Without Augmenting the User}},
year = {2018},
address = {New York, NY, USA},
month = {June},
note = {khamis2018dronet},
pages = {66--71},
publisher = {Association for Computing Machinery},
series = {DroNet'18},
abstract = {Gaze interaction holds a lot of promise for seamless human-computer interaction. At the same time, current wearable mobile eye trackers require user augmentation that negatively impacts natural user behavior while remote trackers require users to position themselves within a confined tracking range. We present GazeDrone, the first system that combines a camera-equipped aerial drone with a computational method to detect sidelong glances for spontaneous (calibration-free) gaze-based interaction with surrounding pervasive systems (e.g., public displays). GazeDrone does not require augmenting each user with on-body sensors and allows interaction from arbitrary positions, even while moving. We demonstrate that drone-supported gaze interaction is feasible and accurate for certain movement types. It is well-perceived by users, in particular while interacting from a fixed position as well as while moving orthogonally or diagonally to a display. We present design implications and discuss opportunities and challenges for drone-supported gaze interaction in public.},
doi = {10.1145/3213526.3213539},
keywords = {Active eye tracking, drones, gaze interaction, UAV.},
timestamp = {2018.09.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018dronet},
}
M. Khamis, F. Alt, and A. Bulling. The Past, Present, and Future of Gaze-enabled Handheld Mobile Devices: Survey and Lessons Learned. In Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services (MobileHCI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 38:1–38:17. doi:10.1145/3229434.3229452
[BibTeX] [Abstract] [PDF]
While first-generation mobile gaze interfaces required special-purpose hardware, recent advances in computational gaze estimation and the availability of sensor-rich and powerful devices is finally fulfilling the promise of pervasive eye tracking and eye-based interaction on off-the-shelf mobile devices. This work provides the first holistic view on the past, present, and future of eye tracking on handheld mobile devices. To this end, we discuss how research developed from building hardware prototypes, to accurate gaze estimation on unmodified smartphones and tablets. We then discuss implications by laying out 1) novel opportunities, including pervasive advertising and conducting in-the-wild eye tracking studies on handhelds, and 2) new challenges that require further research, such as visibility of the user’s eyes, lighting conditions, and privacy implications. We discuss how these developments shape MobileHCI research in the future, possibly the next 20 years.
@InProceedings{khamis2018mobilehci,
author = {Khamis, Mohamed and Alt, Florian and Bulling, Andreas},
booktitle = {{Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services}},
title = {{The Past, Present, and Future of Gaze-enabled Handheld Mobile Devices: Survey and Lessons Learned}},
year = {2018},
address = {New York, NY, USA},
note = {khamis2018mobilehci},
pages = {38:1--38:17},
publisher = {Association for Computing Machinery},
series = {MobileHCI '18},
abstract = {While first-generation mobile gaze interfaces required special-purpose hardware, recent advances in computational gaze estimation and the availability of sensor-rich and powerful devices is finally fulfilling the promise of pervasive eye tracking and eye-based interaction on off-the-shelf mobile devices. This work provides the first holistic view on the past, present, and future of eye tracking on handheld mobile devices. To this end, we discuss how research developed from building hardware prototypes, to accurate gaze estimation on unmodified smartphones and tablets. We then discuss implications by laying out 1) novel opportunities, including pervasive advertising and conducting in-the-wild eye tracking studies on handhelds, and 2) new challenges that require further research, such as visibility of the user's eyes, lighting conditions, and privacy implications. We discuss how these developments shape MobileHCI research in the future, possibly the next 20 years.},
acmid = {3229452},
articleno = {38},
doi = {10.1145/3229434.3229452},
isbn = {978-1-4503-5898-9},
keywords = {eye tracking, gaze estimation, gaze interaction, mobile devices, smartphones, tablets},
location = {Barcelona, Spain},
numpages = {17},
timestamp = {2018.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018mobilehci.pdf},
}
L. Mecke, S. Prange, D. Buschek, M. Khamis, M. Hassib, and F. Alt. ‘Outsourcing” Security: Supporting People to Support Older Adults’. In Proceedings of the MobileHCI ’18 Workshop on Mobile Privacy and Security for an Aging Population (), 2018.
[BibTeX] [Abstract] [PDF]
Older adults often rely on the support of trusted individuals (e.g., younger family members) when performing complex tasks on their mobile devices, such as configuring privacy settings. However, a prominent problem is that systems are designed with the intention of a single “main user” us- ing them, with little to no support for cases where the user would like to get external help from others. In this work, we provide anecdotal evidence of problems faced by support- ers who try to help older adults in privacy and security re- lated tasks. We outline multiple suggestions for future work in this area, and discuss how systems can support people who support older adults.
@InProceedings{mecke2018mobilehciadj,
author = {Lukas Mecke AND Sarah Prange AND Daniel Buschek AND Mohamed Khamis AND Mariam Hassib AND Florian Alt},
booktitle = {{Proceedings of the MobileHCI ’18 Workshop on Mobile Privacy and Security for an Aging Population}},
title = {{'Outsourcing” Security: Supporting People to Support Older Adults'}},
year = {2018},
note = {mecke2018mobilehciadj},
abstract = {Older adults often rely on the support of trusted individuals (e.g., younger family members) when performing complex tasks on their mobile devices, such as configuring privacy settings. However, a prominent problem is that systems are designed with the intention of a single “main user” us- ing them, with little to no support for cases where the user would like to get external help from others. In this work, we provide anecdotal evidence of problems faced by support- ers who try to help older adults in privacy and security re- lated tasks. We outline multiple suggestions for future work in this area, and discuss how systems can support people who support older adults.},
owner = {florian},
timestamp = {2018.08.31},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2018mobilehciadj.pdf},
}
T. Mattusch, M. Mirzamohammad, M. Khamis, A. Bulling, and F. Alt. Hidden Pursuits: Evaluating Gaze-selection via Pursuits when the Stimuli’s Trajectory is Partially Hidden. In Proceedings of the 2018 ACM Symposium on Eye Tracking Research & Applications (ETRA ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 27:1–27:5. doi:10.1145/3204493.3204569
[BibTeX] [Abstract] [PDF]
The idea behind gaze interaction using Pursuits is to leverage the human’s smooth pursuit eye movements performed when following moving targets. However, humans can also anticipate where a moving target would reappear if it temporarily hides from their view. In this work, we investigate how well users can select targets using Pursuits in cases where the target’s trajectory is partially invisible (HiddenPursuits): e.g., can users select a moving target that temporarily hides behind another object? Although HiddenPursuits was not studied in the context of interaction before, understanding how well users can perform HiddenPursuits presents numerous opportunities, particularly for small interfaces where a target’s trajectory can cover area outside of the screen. We found that users can still select targets quickly via Pursuits even if their trajectory is up to 50% hidden, and at the expense of longer selection times when the hidden portion is larger. We discuss how gaze-based interfaces can leverage HiddenPursuits for an improved user experience.
@InProceedings{mattusch2018etra,
author = {Mattusch, Thomas and Mirzamohammad, Mahsa and Khamis, Mohamed and Bulling, Andreas and Alt, Florian},
booktitle = {{Proceedings of the 2018 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{Hidden Pursuits: Evaluating Gaze-selection via Pursuits when the Stimuli's Trajectory is Partially Hidden}},
year = {2018},
address = {New York, NY, USA},
note = {mattusch2018etra},
pages = {27:1--27:5},
publisher = {Association for Computing Machinery},
series = {ETRA '18},
abstract = {The idea behind gaze interaction using Pursuits is to leverage the human's smooth pursuit eye movements performed when following moving targets. However, humans can also anticipate where a moving target would reappear if it temporarily hides from their view. In this work, we investigate how well users can select targets using Pursuits in cases where the target's trajectory is partially invisible (HiddenPursuits): e.g., can users select a moving target that temporarily hides behind another object? Although HiddenPursuits was not studied in the context of interaction before, understanding how well users can perform HiddenPursuits presents numerous opportunities, particularly for small interfaces where a target's trajectory can cover area outside of the screen. We found that users can still select targets quickly via Pursuits even if their trajectory is up to 50% hidden, and at the expense of longer selection times when the hidden portion is larger. We discuss how gaze-based interfaces can leverage HiddenPursuits for an improved user experience.},
acmid = {3204569},
articleno = {27},
doi = {10.1145/3204493.3204569},
isbn = {978-1-4503-5706-7},
keywords = {displays, hidden trajectory, motion correlation, smooth pursuit},
location = {Warsaw, Poland},
numpages = {5},
timestamp = {2018.06.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mattusch2018etra.pdf},
}
A. Colley, J. Häkkilä, M. Forsman, B. Pfleging, and F. Alt. Car Exterior Surface Displays: Exploration in a Real-World Context. In Proceedings of the 2018 ACM International Symposium on Pervasive Displays (PerDis ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 7:1–7:8. doi:10.1145/3205873.3205880
[BibTeX] [Abstract] [PDF]
Current changes in the automotive industry towards autonomous vehicles will spur wide ranging changes in the roles of cars in urban environments. When combined with advances in display technology, this creates potential for the outer surfaces of cars to act as public displays. We present a real-world in context study, where participants ideated on a variety of different types of informative content, displayed on or around vehicles. Our study approach utilized handheld projection to create visualization experiences suggestive of the capabilities of future display technologies. The salient findings show that ideas related to the car and the driving function, such as parking, warning pedestrians and changing the vehicles aesthetic appearance, were appreciated. In contrast, ideas where the vehicle formed part of a smart urban infrastructure, such as guiding pedestrians or acting as a public display caused diverse opinions. In particular, concepts where personalized content was shown were disliked for reasons related to privacy and feeling like ‘big brother’ is watching.
@InProceedings{colley2018perdis,
author = {Colley, Ashley and H\"{a}kkil\"{a}, Jonna and Forsman, Meri-Tuulia and Pfleging, Bastian and Alt, Florian},
booktitle = {{Proceedings of the 2018 ACM International Symposium on Pervasive Displays}},
title = {{Car Exterior Surface Displays: Exploration in a Real-World Context}},
year = {2018},
address = {New York, NY, USA},
note = {colley2018perdis},
pages = {7:1--7:8},
publisher = {Association for Computing Machinery},
series = {PerDis '18},
abstract = {Current changes in the automotive industry towards autonomous vehicles will spur wide ranging changes in the roles of cars in urban environments. When combined with advances in display technology, this creates potential for the outer surfaces of cars to act as public displays. We present a real-world in context study, where participants ideated on a variety of different types of informative content, displayed on or around vehicles. Our study approach utilized handheld projection to create visualization experiences suggestive of the capabilities of future display technologies. The salient findings show that ideas related to the car and the driving function, such as parking, warning pedestrians and changing the vehicles aesthetic appearance, were appreciated. In contrast, ideas where the vehicle formed part of a smart urban infrastructure, such as guiding pedestrians or acting as a public display caused diverse opinions. In particular, concepts where personalized content was shown were disliked for reasons related to privacy and feeling like 'big brother' is watching.},
acmid = {3205880},
articleno = {7},
doi = {10.1145/3205873.3205880},
isbn = {978-1-4503-5765-4},
keywords = {Automotive UI, interactive surfaces, pedestrian guidance, pervasive navigation, projected AR, public displays, spatial augmented reality},
location = {Munich, Germany},
numpages = {8},
timestamp = {2018.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/colley2018perdis.pdf},
}
F. Alt, S. Geiger, and W. Höhl. ShapelineGuide: Teaching Mid-Air Gestures for Large Interactive Displays. In Proceedings of the 2018 ACM International Symposium on Pervasive Displays (PerDis ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 3:1–3:8. doi:10.1145/3205873.3205887
[BibTeX] [Abstract] [PDF]
We present ShapelineGuide, a dynamic visual guide that supports users of large interactive displays while performing mid-air gestures. Today, we find many examples of large displays supporting interaction through gestures performed in Mid-air. Yet, approaches that support users in learning and executing these gestures are still scarce. Prior approaches require complex setups, are targeted towards the use of 2D gestures, or focus on the initial gestures only. Our work extends state-of-the-art by presenting a feedforward system that provides users constant updates on their gestures. We report on the design and implementation of the approach and present findings from an evaluation of the system in a lab study (N=44), focusing on learning performance, accuracy, and errors. We found that ShapelineGuide helps users with regard to learning the gestures as well as decreases execution times and cognitive load.
@InProceedings{alt2018perdis,
author = {Alt, Florian and Geiger, Sabrina and H\"{o}hl, Wolfgang},
booktitle = {{Proceedings of the 2018 ACM International Symposium on Pervasive Displays}},
title = {{ShapelineGuide: Teaching Mid-Air Gestures for Large Interactive Displays}},
year = {2018},
address = {New York, NY, USA},
note = {alt2018perdis},
pages = {3:1--3:8},
publisher = {Association for Computing Machinery},
series = {PerDis '18},
abstract = {We present ShapelineGuide, a dynamic visual guide that supports users of large interactive displays while performing mid-air gestures. Today, we find many examples of large displays supporting interaction through gestures performed in Mid-air. Yet, approaches that support users in learning and executing these gestures are still scarce. Prior approaches require complex setups, are targeted towards the use of 2D gestures, or focus on the initial gestures only. Our work extends state-of-the-art by presenting a feedforward system that provides users constant updates on their gestures. We report on the design and implementation of the approach and present findings from an evaluation of the system in a lab study (N=44), focusing on learning performance, accuracy, and errors. We found that ShapelineGuide helps users with regard to learning the gestures as well as decreases execution times and cognitive load.},
acmid = {3205887},
articleno = {3},
doi = {10.1145/3205873.3205887},
isbn = {978-1-4503-5765-4},
keywords = {Displays, Dynamic Guides, Feedback, Feedforward, Gestures},
location = {Munich, Germany},
numpages = {8},
timestamp = {2018.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2018perdis.pdf},
}
M. Khamis, C. Oechsner, F. Alt, and A. Bulling. VRpursuits: Interaction in Virtual Reality Using Smooth Pursuit Eye Movements. In Proceedings of the 2018 International Conference on Advanced Visual Interfaces (AVI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 18:1–18:8. doi:10.1145/3206505.3206522
[BibTeX] [Abstract] [PDF]
Gaze-based interaction using smooth pursuit eye movements (Pursuits) is attractive given that it is intuitive and overcomes the Midas touch problem. At the same time, eye tracking is becoming increasingly popular for VR applications. While Pursuits was shown to be effective in several interaction contexts, it was never explored in-depth for VR before. In a user study (N=26), we investigated how parameters that are specific to VR settings influence the performance of Pursuits. For example, we found that Pursuits is robust against different sizes of virtual 3D targets. However performance improves when the trajectory size (e.g., radius) is larger, particularly if the user is walking while interacting. While walking, selecting moving targets via Pursuits is generally feasible albeit less accurate than when stationary. Finally, we discuss the implications of these findings and the potential of smooth pursuits for interaction in VR by demonstrating two sample use cases: 1) gaze-based authentication in VR, and 2) a space meteors shooting game.
@InProceedings{khamis2018avi,
author = {Khamis, Mohamed and Oechsner, Carl and Alt, Florian and Bulling, Andreas},
booktitle = {{Proceedings of the 2018 International Conference on Advanced Visual Interfaces}},
title = {{VRpursuits: Interaction in Virtual Reality Using Smooth Pursuit Eye Movements}},
year = {2018},
address = {New York, NY, USA},
note = {khamis2018avi},
pages = {18:1--18:8},
publisher = {Association for Computing Machinery},
series = {AVI '18},
abstract = {Gaze-based interaction using smooth pursuit eye movements (Pursuits) is attractive given that it is intuitive and overcomes the Midas touch problem. At the same time, eye tracking is becoming increasingly popular for VR applications. While Pursuits was shown to be effective in several interaction contexts, it was never explored in-depth for VR before. In a user study (N=26), we investigated how parameters that are specific to VR settings influence the performance of Pursuits. For example, we found that Pursuits is robust against different sizes of virtual 3D targets. However performance improves when the trajectory size (e.g., radius) is larger, particularly if the user is walking while interacting. While walking, selecting moving targets via Pursuits is generally feasible albeit less accurate than when stationary. Finally, we discuss the implications of these findings and the potential of smooth pursuits for interaction in VR by demonstrating two sample use cases: 1) gaze-based authentication in VR, and 2) a space meteors shooting game.},
acmid = {3206522},
articleno = {18},
doi = {10.1145/3206505.3206522},
isbn = {978-1-4503-5616-9},
keywords = {eye tracking, gaze interaction, pursuits, virtual reality},
location = {Castiglione della Pescaia, Grosseto, Italy},
numpages = {8},
timestamp = {2018.05.31},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018avi.pdf},
}
T. Kosch, M. Hassib, P. W. Woźniak, D. Buschek, and F. Alt. Your Eyes Tell: Leveraging Smooth Pursuit for Assessing Cognitive Workload. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 436:1–436:13. doi:10.1145/3173574.3174010
[BibTeX] [Abstract] [PDF]
A common objective for context-aware computing systems is to predict how user interfaces impact user performance regarding their cognitive capabilities. Existing approaches such as questionnaires or pupil dilation measurements either only allow for subjective assessments or are susceptible to environmental influences and user physiology. We address these challenges by exploiting the fact that cognitive workload influences smooth pursuit eye movements. We compared three trajectories and two speeds under different levels of cognitive workload within a user study (N=20). We found higher deviations of gaze points during smooth pursuit eye movements for specific trajectory types at higher cognitive workload levels. Using an SVM classifier, we predict cognitive workload through smooth pursuit with an accuracy of 99.5% for distinguishing between low and high workload as well as an accuracy of 88.1% for estimating workload between three levels of difficulty. We discuss implications and present use cases of how cognition-aware systems benefit from inferring cognitive workload in real-time by smooth pursuit eye movements.
@InProceedings{kosch2018chi,
author = {Kosch, Thomas and Hassib, Mariam and Wo\'{z}niak, Pawe\l W. and Buschek, Daniel and Alt, Florian},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{Your Eyes Tell: Leveraging Smooth Pursuit for Assessing Cognitive Workload}},
year = {2018},
address = {New York, NY, USA},
note = {kosch2018chi},
pages = {436:1--436:13},
publisher = {Association for Computing Machinery},
series = {CHI '18},
abstract = {A common objective for context-aware computing systems is to predict how user interfaces impact user performance regarding their cognitive capabilities. Existing approaches such as questionnaires or pupil dilation measurements either only allow for subjective assessments or are susceptible to environmental influences and user physiology. We address these challenges by exploiting the fact that cognitive workload influences smooth pursuit eye movements. We compared three trajectories and two speeds under different levels of cognitive workload within a user study (N=20). We found higher deviations of gaze points during smooth pursuit eye movements for specific trajectory types at higher cognitive workload levels. Using an SVM classifier, we predict cognitive workload through smooth pursuit with an accuracy of 99.5% for distinguishing between low and high workload as well as an accuracy of 88.1% for estimating workload between three levels of difficulty. We discuss implications and present use cases of how cognition-aware systems benefit from inferring cognitive workload in real-time by smooth pursuit eye movements.},
acmid = {3174010},
articleno = {436},
doi = {10.1145/3173574.3174010},
isbn = {978-1-4503-5620-6},
keywords = {cognition-aware user interfaces, cognitive workload, eye tracking, mental workload, smooth pursuit, workload-aware computing},
location = {Montreal QC, Canada},
numpages = {13},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/kosch2018chi.pdf},
}
M. Khamis, A. Baier, N. Henze, F. Alt, and A. Bulling. Understanding Face and Eye Visibility in Front-Facing Cameras of Smartphones Used in the Wild. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 280:1–280:12. doi:10.1145/3173574.3173854
[BibTeX] [Abstract] [PDF]
Commodity mobile devices are now equipped with high-resolution front-facing cameras, allowing applications in biometrics (e.g., FaceID in the iPhone X), facial expression analysis, or gaze interaction. However, it is unknown how often users hold devices in a way that allows capturing their face or eyes, and how this impacts detection accuracy. We collected 25,726 in-the-wild photos, taken from the front-facing camera of smartphones as well as associated application usage logs. We found that the full face is visible about 29% of the time, and that in most cases the face is only partially visible. Furthermore, we identified an influence of users’ current activity; for example, when watching videos, the eyes but not the entire face are visible 75% of the time in our dataset. We found that a state-of-the-art face detection algorithm performs poorly against photos taken from front-facing cameras. We discuss how these findings impact mobile applications that leverage face and eye detection, and derive practical implications to address state-of-the art’s limitations.
@InProceedings{khamis2018chi2,
author = {Khamis, Mohamed and Baier, Anita and Henze, Niels and Alt, Florian and Bulling, Andreas},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{Understanding Face and Eye Visibility in Front-Facing Cameras of Smartphones Used in the Wild}},
year = {2018},
address = {New York, NY, USA},
note = {khamis2018chi2},
pages = {280:1--280:12},
publisher = {Association for Computing Machinery},
series = {CHI '18},
abstract = {Commodity mobile devices are now equipped with high-resolution front-facing cameras, allowing applications in biometrics (e.g., FaceID in the iPhone X), facial expression analysis, or gaze interaction. However, it is unknown how often users hold devices in a way that allows capturing their face or eyes, and how this impacts detection accuracy. We collected 25,726 in-the-wild photos, taken from the front-facing camera of smartphones as well as associated application usage logs. We found that the full face is visible about 29% of the time, and that in most cases the face is only partially visible. Furthermore, we identified an influence of users' current activity; for example, when watching videos, the eyes but not the entire face are visible 75% of the time in our dataset. We found that a state-of-the-art face detection algorithm performs poorly against photos taken from front-facing cameras. We discuss how these findings impact mobile applications that leverage face and eye detection, and derive practical implications to address state-of-the art's limitations.},
acmid = {3173854},
articleno = {280},
doi = {10.1145/3173574.3173854},
isbn = {978-1-4503-5620-6},
keywords = {eye tracking, face detection, front-facing camera, gaze estimation, in the wild study, mobile device},
location = {Montreal QC, Canada},
numpages = {12},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018chi2.pdf},
}
D. Weber, A. Voit, G. Kollotzek, L. van der Vekens, M. Hepting, F. Alt, and N. Henze. PD Notify: Investigating Personal Content on Public Displays. In Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems (CHI EA ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. LBW014:1–LBW014:6. doi:10.1145/3170427.3188475
[BibTeX] [Abstract] [PDF]
Public displays are becoming more and more ubiquitous. Current public displays are mainly used as general information displays or to display advertisements. How personal content should be shown is still an important research topic. In this paper, we present PD Notify, a system that mirrors a user’s pending smartphone notifications on nearby public displays. Notifications are an essential part of current smartphones and inform users about various events, such as new messages, pending updates, personalized news, and upcoming appointments. PD Notify implements privacy settings to control what is shown on the public displays. We conducted an in-situ study in a semi-public work environment for three weeks with seven participants. The results of this first deployment show that displaying personal content on public displays is not only feasible but also valued by users. Participants quickly settled for privacy settings that work for all kinds of content. While they liked the system, they did not want to spend time configuring it.
@InProceedings{weber2018chiea,
author = {Weber, Dominik and Voit, Alexandra and Kollotzek, Gisela and van der Vekens, Lucas and Hepting, Marcus and Alt, Florian and Henze, Niels},
booktitle = {{Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{PD Notify: Investigating Personal Content on Public Displays}},
year = {2018},
address = {New York, NY, USA},
note = {weber2018chiea},
pages = {LBW014:1--LBW014:6},
publisher = {Association for Computing Machinery},
series = {CHI EA '18},
abstract = {Public displays are becoming more and more ubiquitous. Current public displays are mainly used as general information displays or to display advertisements. How personal content should be shown is still an important research topic. In this paper, we present PD Notify, a system that mirrors a user's pending smartphone notifications on nearby public displays. Notifications are an essential part of current smartphones and inform users about various events, such as new messages, pending updates, personalized news, and upcoming appointments. PD Notify implements privacy settings to control what is shown on the public displays. We conducted an in-situ study in a semi-public work environment for three weeks with seven participants. The results of this first deployment show that displaying personal content on public displays is not only feasible but also valued by users. Participants quickly settled for privacy settings that work for all kinds of content. While they liked the system, they did not want to spend time configuring it.},
acmid = {3188475},
articleno = {LBW014},
doi = {10.1145/3170427.3188475},
isbn = {978-1-4503-5621-3},
keywords = {notifications, pervasive, privacy, public displays},
location = {Montreal QC, Canada},
numpages = {6},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/weber2018chiea.pdf},
}
M. Khamis, D. Buschek, T. Thieron, F. Alt, and A. Bulling. EyePACT: Eye-Based Parallax Correction on Touch-Enabled Interactive Displays. Proceedings of the ACM on Interactive Mobile Wearable and Ubiquitous Technologies, vol. 1, iss. 4, p. 146:1–146:18, 2018. doi:10.1145/3161168
[BibTeX] [Abstract] [PDF]
The parallax effect describes the displacement between the perceived and detected touch locations on a touch-enabled surface. Parallax is a key usability challenge for interactive displays, particularly for those that require thick layers of glass between the screen and the touch surface to protect them from vandalism. To address this challenge, we present EyePACT, a method that compensates for input error caused by parallax on public displays. Our method uses a display-mounted depth camera to detect the user’s 3D eye position in front of the display and the detected touch location to predict the perceived touch location on the surface. We evaluate our method in two user studies in terms of parallax correction performance as well as multi-user support. Our evaluations demonstrate that EyePACT (1) significantly improves accuracy even with varying gap distances between the touch surface and the display, (2) adapts to different levels of parallax by resulting in significantly larger corrections with larger gap distances, and (3) maintains a significantly large distance between two users’ fingers when interacting with the same object. These findings are promising for the development of future parallax-free interactive displays.
@Article{khamis2018imwut1,
author = {Khamis, Mohamed and Buschek, Daniel and Thieron, Tobias and Alt, Florian and Bulling, Andreas},
journal = {{Proceedings of the ACM on Interactive Mobile Wearable and Ubiquitous Technologies}},
title = {{EyePACT: Eye-Based Parallax Correction on Touch-Enabled Interactive Displays}},
year = {2018},
issn = {2474-9567},
month = jan,
note = {khamis2018imwut},
number = {4},
pages = {146:1--146:18},
volume = {1},
abstract = {The parallax effect describes the displacement between the perceived and detected touch locations on a touch-enabled surface. Parallax is a key usability challenge for interactive displays, particularly for those that require thick layers of glass between the screen and the touch surface to protect them from vandalism. To address this challenge, we present EyePACT, a method that compensates for input error caused by parallax on public displays. Our method uses a display-mounted depth camera to detect the user's 3D eye position in front of the display and the detected touch location to predict the perceived touch location on the surface. We evaluate our method in two user studies in terms of parallax correction performance as well as multi-user support. Our evaluations demonstrate that EyePACT (1) significantly improves accuracy even with varying gap distances between the touch surface and the display, (2) adapts to different levels of parallax by resulting in significantly larger corrections with larger gap distances, and (3) maintains a significantly large distance between two users' fingers when interacting with the same object. These findings are promising for the development of future parallax-free interactive displays.},
acmid = {3161168},
address = {New York, NY, USA},
articleno = {146},
doi = {10.1145/3161168},
issue_date = {December 2017},
keywords = {Gaze, Parallax, Public Displays, Touch screens},
numpages = {18},
publisher = {Association for Computing Machinery},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018imwut.pdf},
}
D. Buschek, B. Bisinger, and F. Alt. ResearchIME: A Mobile Keyboard Application for Studying Free Typing Behaviour in the Wild. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 255:1–255:14. doi:10.1145/3173574.3173829
[BibTeX] [Abstract] [PDF]
We present a data logging concept, tool, and analyses to facilitatestudies of everyday mobile touch keyboard use andfree typing behaviour: 1) We propose a filtering concept to logtyping without recording readable text and assess reactions tofilters with a survey (N=349). 2) We release an Android keyboardapp and backend that implement this concept. 3) Basedon a three-week field study (N=30), we present the first analysesof keyboard use and typing biometrics on such free texttyping data in the wild, including speed, postures, apps, autocorrection, and word suggestions. We conclude that researchon mobile keyboards benefits from observing free typing beyondthe lab and discuss ideas for further studies.
@InProceedings{buschek2018chi2,
author = {Buschek, Daniel and Bisinger, Benjamin and Alt, Florian},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{ResearchIME: A Mobile Keyboard Application for Studying Free Typing Behaviour in the Wild}},
year = {2018},
address = {New York, NY, USA},
note = {buschek2018chi2},
pages = {255:1--255:14},
publisher = {Association for Computing Machinery},
series = {CHI '18},
abstract = {We present a data logging concept, tool, and analyses to facilitatestudies of everyday mobile touch keyboard use andfree typing behaviour: 1) We propose a filtering concept to logtyping without recording readable text and assess reactions tofilters with a survey (N=349). 2) We release an Android keyboardapp and backend that implement this concept. 3) Basedon a three-week field study (N=30), we present the first analysesof keyboard use and typing biometrics on such free texttyping data in the wild, including speed, postures, apps, autocorrection, and word suggestions. We conclude that researchon mobile keyboards benefits from observing free typing beyondthe lab and discuss ideas for further studies.},
acmid = {3173829},
articleno = {255},
comment = {buschek2018chi2},
doi = {10.1145/3173574.3173829},
isbn = {978-1-4503-5620-6},
keywords = {biometrics, data logging, touch keyboard, typing behaviour},
location = {Montreal QC, Canada},
numpages = {14},
timestamp = {2018.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/buschek2018chi2.pdf},
}
V. Mäkelä, M. Khamis, L. Mecke, J. James, M. Turunen, and F. Alt. Pocket Transfers: Interaction Techniques for Transferring Content from Situated Displays to Mobile Devices. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 135:1–135:13. doi:10.1145/3173574.3173709
[BibTeX] [Abstract] [PDF]
We present Pocket Transfers: interaction techniques that allow users to transfer content from situated displays to a personal mobile device while keeping the device in a pocket or bag. Existing content transfer solutions require direct manipulation of the mobile device, making inter-action slower and less flexible. Our introduced tech-niques employ touch, mid-air gestures, gaze, and a mul-timodal combination of gaze and mid-air gestures. We evaluated the techniques in a novel user study (N=20), where we considered dynamic scenarios where the user approaches the display, completes the task, and leaves. We show that all pocket transfer techniques are fast and seen as highly convenient. Mid-air gestures are the most efficient touchless method for transferring a single item, while the multimodal method is the fastest touchless method when multiple items are transferred. We provide guidelines to help researchers and practitioners choose the most suitable content transfer techniques for their systems.
@InProceedings{makela2018chi,
author = {M\"{a}kel\"{a}, Ville and Khamis, Mohamed and Mecke, Lukas and James, Jobin and Turunen, Markku and Alt, Florian},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{Pocket Transfers: Interaction Techniques for Transferring Content from Situated Displays to Mobile Devices}},
year = {2018},
address = {New York, NY, USA},
note = {makela2018chi},
pages = {135:1--135:13},
publisher = {Association for Computing Machinery},
series = {CHI '18},
abstract = {We present Pocket Transfers: interaction techniques that allow users to transfer content from situated displays to a personal mobile device while keeping the device in a pocket or bag. Existing content transfer solutions require direct manipulation of the mobile device, making inter-action slower and less flexible. Our introduced tech-niques employ touch, mid-air gestures, gaze, and a mul-timodal combination of gaze and mid-air gestures. We evaluated the techniques in a novel user study (N=20), where we considered dynamic scenarios where the user approaches the display, completes the task, and leaves. We show that all pocket transfer techniques are fast and seen as highly convenient. Mid-air gestures are the most efficient touchless method for transferring a single item, while the multimodal method is the fastest touchless method when multiple items are transferred. We provide guidelines to help researchers and practitioners choose the most suitable content transfer techniques for their systems.},
acmid = {3173709},
articleno = {135},
doi = {10.1145/3173574.3173709},
isbn = {978-1-4503-5620-6},
keywords = {content transfer, cross-device interaction, gaze, mid-air gestures, multimodal, public displays, ubiquitous computing},
location = {Montreal QC, Canada},
numpages = {13},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/makela2018chi.pdf},
}
D. Buschek, B. Roppelt, and F. Alt. Extending Keyboard Shortcuts with Arm and Wrist Rotation Gestures. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 1–12. doi:10.1145/3173574.3173595
[BibTeX] [Abstract] [PDF]
We propose and evaluate a novel interaction technique to enhance physical keyboard shortcuts with arm and wrist rotation gestures, performed during keypresses: rolling the wrist, rotating the arm/wrist, and lifting it. This extends the set of shortcuts from key combinations (e.g. ctrl + v) to combinations of key(s) and gesture (e.g. v + roll left) and enables continuous control. We implement this approach for isolated single keypresses, using inertial sensors of a smartwatch. We investigate key aspects in three studies: 1) rotation flexibility per keystroke finger, 2) rotation control, and 3) user-defined gesture shortcuts. As a use case, we employ our technique in a painting application and assess user experience. Overall, results show that arm and wrist rotations during keystrokes can be used for interaction, yet challenges remain for integration into practical applications. We discuss recommendations for applications and ideas for future research.
@InProceedings{buschek2018chi1,
author = {Buschek, Daniel and Roppelt, Bianka and Alt, Florian},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{Extending Keyboard Shortcuts with Arm and Wrist Rotation Gestures}},
year = {2018},
address = {New York, NY, USA},
note = {buschek2018chi1},
pages = {1–12},
publisher = {Association for Computing Machinery},
series = {CHI'18},
abstract = {We propose and evaluate a novel interaction technique to enhance physical keyboard
shortcuts with arm and wrist rotation gestures, performed during keypresses: rolling
the wrist, rotating the arm/wrist, and lifting it. This extends the set of shortcuts
from key combinations (e.g. ctrl + v) to combinations of key(s) and gesture (e.g.
v + roll left) and enables continuous control. We implement this approach for isolated
single keypresses, using inertial sensors of a smartwatch. We investigate key aspects
in three studies: 1) rotation flexibility per keystroke finger, 2) rotation control,
and 3) user-defined gesture shortcuts. As a use case, we employ our technique in a
painting application and assess user experience. Overall, results show that arm and
wrist rotations during keystrokes can be used for interaction, yet challenges remain
for integration into practical applications. We discuss recommendations for applications
and ideas for future research.},
doi = {10.1145/3173574.3173595},
isbn = {9781450356206},
numpages = {12},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2018chi1.pdf},
}
M. Khamis, C. Becker, A. Bulling, and F. Alt. Which One is Me?: Identifying Oneself on Public Displays. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 287:1–287:12. doi:10.1145/3173574.3173861
[BibTeX] [Abstract] [PDF]
While user representations are extensively used on public displays, it remains unclear how well users can recognize their own representation among those of surrounding users. We study the most widely used representations: abstract objects, skeletons, silhouettes and mirrors. In a prestudy (N=12), we identify five strategies that users follow to recognize themselves on public displays. In a second study (N=19), we quantify the users’ recognition time and accuracy with respect to each representation type. Our findings suggest that there is a significant effect of (1) the representation type, (2) the strategies performed by users, and (3) the combination of both on recognition time and accuracy. We discuss the suitability of each representation for different settings and provide specific recommendations as to how user representations should be applied in multi-user scenarios. These recommendations guide practitioners and researchers in selecting the representation that optimizes the most for the deployment’s requirements, and for the user strategies that are feasible in that environment.
@InProceedings{khamis2018chi1,
author = {Khamis, Mohamed and Becker, Christian and Bulling, Andreas and Alt, Florian},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{Which One is Me?: Identifying Oneself on Public Displays}},
year = {2018},
address = {New York, NY, USA},
note = {khamis2018chi1},
pages = {287:1--287:12},
publisher = {Association for Computing Machinery},
series = {CHI '18},
abstract = {While user representations are extensively used on public displays, it remains unclear how well users can recognize their own representation among those of surrounding users. We study the most widely used representations: abstract objects, skeletons, silhouettes and mirrors. In a prestudy (N=12), we identify five strategies that users follow to recognize themselves on public displays. In a second study (N=19), we quantify the users' recognition time and accuracy with respect to each representation type. Our findings suggest that there is a significant effect of (1) the representation type, (2) the strategies performed by users, and (3) the combination of both on recognition time and accuracy. We discuss the suitability of each representation for different settings and provide specific recommendations as to how user representations should be applied in multi-user scenarios. These recommendations guide practitioners and researchers in selecting the representation that optimizes the most for the deployment's requirements, and for the user strategies that are feasible in that environment.},
acmid = {3173861},
articleno = {287},
doi = {10.1145/3173574.3173861},
isbn = {978-1-4503-5620-6},
keywords = {multiple users, public displays, user representations},
location = {Montreal QC, Canada},
numpages = {12},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018chi1.pdf},
}
M. Hassib, S. Schneegass, N. Henze, A. Schmidt, and F. Alt. A Design Space for Audience Sensing and Feedback Systems. In Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems (CHI EA ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. LBW085:1–LBW085:6. doi:10.1145/3170427.3188569
[BibTeX] [Abstract] [PDF]
Audience feedback is a valuable asset in many domains such as arts, education, and marketing. Artists can receive feedback on the experiences created through their performances. Similarly, teachers can receive feedback from students on the understandability of their course content. There are various methods to collect explicit feedback (e.g., questionnaires) – yet they usually impose a burden to the audience. Advances in physiological sensing opens up opportunities for collecting feedback implicitly. This creates unexplored dimensions in the design space of audience sensing. In this work, we chart a comprehensive design space for audience sensing based on a literature and market review which aims to support the designers’ process for creating novel feedback systems.
@InProceedings{hassib2018chiea,
author = {Hassib, Mariam and Schneegass, Stefan and Henze, Niels and Schmidt, Albrecht and Alt, Florian},
booktitle = {{Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{A Design Space for Audience Sensing and Feedback Systems}},
year = {2018},
address = {New York, NY, USA},
note = {hassib2018chiea},
pages = {LBW085:1--LBW085:6},
publisher = {Association for Computing Machinery},
series = {CHI EA '18},
abstract = {Audience feedback is a valuable asset in many domains such as arts, education, and marketing. Artists can receive feedback on the experiences created through their performances. Similarly, teachers can receive feedback from students on the understandability of their course content. There are various methods to collect explicit feedback (e.g., questionnaires) - yet they usually impose a burden to the audience. Advances in physiological sensing opens up opportunities for collecting feedback implicitly. This creates unexplored dimensions in the design space of audience sensing. In this work, we chart a comprehensive design space for audience sensing based on a literature and market review which aims to support the designers' process for creating novel feedback systems.},
acmid = {3188569},
articleno = {LBW085},
doi = {10.1145/3170427.3188569},
isbn = {978-1-4503-5621-3},
keywords = {affective computing, audience sensing},
location = {Montreal QC, Canada},
numpages = {6},
timestamp = {2018.04.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2018chiea.pdf},
}
L. Mecke, S. Prange, D. Buschek, and F. Alt. A Design Space for Security Indicators for Behavioural Biometrics on Mobile Touchscreen Devices. In Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems (CHI EA ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. LBW003:1–LBW003:6. doi:10.1145/3170427.3188633
[BibTeX] [Abstract] [PDF]
We propose a design space for security indicators for behavioural biometrics on mobile touchscreen devices. Design dimensions are derived from a focus group with experts and a literature review. The space supports the design of indicators which aim to facilitate users’ decision making, awareness and understanding, as well as increase transparency of behavioural biometrics systems. We conclude with a set of example designs and discuss further extensions, future research questions and study ideas.
@InProceedings{mecke2018chiea,
author = {Mecke, Lukas and Prange, Sarah and Buschek, Daniel and Alt, Florian},
booktitle = {{Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{A Design Space for Security Indicators for Behavioural Biometrics on Mobile Touchscreen Devices}},
year = {2018},
address = {New York, NY, USA},
note = {mecke2018chiea},
pages = {LBW003:1--LBW003:6},
publisher = {Association for Computing Machinery},
series = {CHI EA '18},
abstract = {We propose a design space for security indicators for behavioural biometrics on mobile touchscreen devices. Design dimensions are derived from a focus group with experts and a literature review. The space supports the design of indicators which aim to facilitate users' decision making, awareness and understanding, as well as increase transparency of behavioural biometrics systems. We conclude with a set of example designs and discuss further extensions, future research questions and study ideas.},
acmid = {3188633},
articleno = {LBW003},
doi = {10.1145/3170427.3188633},
isbn = {978-1-4503-5621-3},
keywords = {behavioural biometrics, design space, focus group, mobile touchscreen devices, security indicator},
location = {Montreal QC, Canada},
numpages = {6},
timestamp = {2018.04.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2018chiea.pdf},
}
L. Trotter, S. Prange, M. Khamis, N. Davies, and F. Alt. Design Considerations for Secure and Usable Authentication on Situated Displays. In Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia (MUM 2018), Association for Computing Machinery, New York, NY, USA, 2018, p. 483–490. doi:10.1145/3282894.3289743
[BibTeX] [Abstract] [PDF]
Users often need to authenticate at situated displays in order to, for example, make purchases, access sensitive information, or confirm an identity. However, the exposure of interactions in public spaces introduces a large attack surface (e.g., observation, smudge or thermal attacks). A plethora of authentication models and input modalities that aim at disguising users’ input has been presented in the past. However, a comprehensive analysis on the requirements for secure and usable authentication on public displays is still missing. This work presents 13 design considerations suitable to inform practitioners and researchers during the development process of authentication systems for situated displays in public spaces. It draws on a comprehensive analysis of prior literature and subsequent discussion with five experts in the fields of pervasive displays, human-computer-interaction and usable security.
@InProceedings{trotter2018mumadj,
author = {Trotter, Ludwig and Prange, Sarah and Khamis, Mohamed and Davies, Nigel and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Design Considerations for Secure and Usable Authentication on Situated Displays}},
year = {2018},
address = {New York, NY, USA},
note = {trotter2018mumadj},
pages = {483--490},
publisher = {Association for Computing Machinery},
series = {MUM 2018},
abstract = {Users often need to authenticate at situated displays in order to, for example, make purchases, access sensitive information, or confirm an identity. However, the exposure of interactions in public spaces introduces a large attack surface (e.g., observation, smudge or thermal attacks). A plethora of authentication models and input modalities that aim at disguising users' input has been presented in the past. However, a comprehensive analysis on the requirements for secure and usable authentication on public displays is still missing. This work presents 13 design considerations suitable to inform practitioners and researchers during the development process of authentication systems for situated displays in public spaces. It draws on a comprehensive analysis of prior literature and subsequent discussion with five experts in the fields of pervasive displays, human-computer-interaction and usable security.},
acmid = {3289743},
doi = {10.1145/3282894.3289743},
isbn = {978-1-4503-6594-9},
keywords = {Authentication, Design Considerations, Input Modalities, Public Displays, User Interface Design},
location = {Cairo, Egypt},
numpages = {8},
timestamp = {2011.11.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/trotter2018mumadj.pdf},
}

2017

S. Prange, V. Müller, D. Buschek, and F. Alt. QuakeQuiz – A Case Study on Deploying a Playful Display Application in a Museum Context. In Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia (MUM ’17), Association for Computing Machinery, New York, NY, USA, 2017. doi:10.1145/3152832.3152841
[BibTeX] [Abstract] [PDF]
In this paper, we present a case study in which we designed and implemented an interactive museum exhibit. In particular, we extended a section of the museum with an interactive quiz game. The project is an example of an opportunistic deployment where the needs of different stakeholders (museum administration, visitors, researchers) and the properties of the space needed to be considered. It is also an example of how we can apply knowledge on methodology and audience behavior collected over the past years by the research community. At the focus of this paper is (1) the design and concept phase that led to the initial idea for the exhibit, (2) the implementation phase, (3) a roll-out and early insights phase where we tested and refined the application in an iterative design process on-site, and (4) the final deployment as a permanent exhibit of the museum. We hope our report to be useful for researchers and practitioners designing systems for similar contexts.
@InProceedings{prange2017mum,
author = {Sarah Prange and Victoria M\"uller and Daniel Buschek and Florian Alt},
booktitle = {{Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{QuakeQuiz - A Case Study on Deploying a Playful Display Application in a Museum Context}},
year = {2017},
address = {New York, NY, USA},
note = {prange2017mum},
publisher = {Association for Computing Machinery},
series = {MUM '17},
abstract = {In this paper, we present a case study in which we designed and implemented an interactive museum exhibit. In particular, we extended a section of the museum with an interactive quiz game. The project is an example of an opportunistic deployment where the needs of different stakeholders (museum administration, visitors, researchers) and the properties of the space needed to be considered. It is also an example of how we can apply knowledge on methodology and audience behavior collected over the past years by the research community. At the focus of this paper is (1) the design and concept phase that led to the initial idea for the exhibit, (2) the implementation phase, (3) a roll-out and early insights phase where we tested and refined the application in an iterative design process on-site, and (4) the final deployment as a permanent exhibit of the museum. We hope our report to be useful for researchers and practitioners designing systems for similar contexts.},
doi = {10.1145/3152832.3152841},
location = {Stuttgart, Germany},
timestamp = {2017.11.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2017mum.pdf},
}
M. Khamis, L. Bandelow, S. Schick, D. Casadevall, A. Bulling, and F. Alt. They Are All After You: Investigating the Viability of a Threat Model That Involves Multiple Shoulder Surfers. In Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia (MUM ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 31–35. doi:10.1145/3152832.3152851
[BibTeX] [Abstract] [PDF]
Many of the authentication schemes for mobile devices that were proposed lately complicate shoulder surfing by splitting the attacker’s attention into two or more entities. For example, multimodal authentication schemes such as GazeTouchPIN and GazeTouchPass require attackers to observe the user’s gaze input and the touch input performed on the phone’s screen. These schemes have always been evaluated against single observers, while multiple observers could potentially attack these schemes with greater ease, since each of them can focus exclusively on one part of the password. In this work, we study the effectiveness of a novel threat model against authentication schemes that split the attacker’s attention. As a case study, we report on a security evaluation of two state of the art authentication schemes in the case of a team of two observers. Our results show that although multiple observers perform better against these schemes than single observers, multimodal schemes are significantly more secure against multiple observers compared to schemes that employ a single modality. We discuss how this threat model impacts the design of authentication schemes.
@InProceedings{khamis2017mum,
author = {Khamis, Mohamed and Bandelow, Linda and Schick, Stina and Casadevall, Dario and Bulling, Andreas and Alt, Florian},
booktitle = {{Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{They Are All After You: Investigating the Viability of a Threat Model That Involves Multiple Shoulder Surfers}},
year = {2017},
address = {New York, NY, USA},
note = {khamis2017mum},
pages = {31--35},
publisher = {Association for Computing Machinery},
series = {MUM '17},
abstract = {Many of the authentication schemes for mobile devices that were proposed lately complicate shoulder surfing by splitting the attacker's attention into two or more entities. For example, multimodal authentication schemes such as GazeTouchPIN and GazeTouchPass require attackers to observe the user's gaze input and the touch input performed on the phone's screen. These schemes have always been evaluated against single observers, while multiple observers could potentially attack these schemes with greater ease, since each of them can focus exclusively on one part of the password. In this work, we study the effectiveness of a novel threat model against authentication schemes that split the attacker's attention. As a case study, we report on a security evaluation of two state of the art authentication schemes in the case of a team of two observers. Our results show that although multiple observers perform better against these schemes than single observers, multimodal schemes are significantly more secure against multiple observers compared to schemes that employ a single modality. We discuss how this threat model impacts the design of authentication schemes.},
acmid = {3152851},
doi = {10.1145/3152832.3152851},
isbn = {978-1-4503-5378-6},
keywords = {gaze gestures, multimodal authentication, multiple observers, privacy, shoulder surfing, threat model},
location = {Stuttgart, Germany},
numpages = {5},
timestamp = {2017.11.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017mum.pdf},
}
M. Hassib, M. Khamis, S. Friedl, S. Schneegass, and F. Alt. BrainAtWork: Logging Cognitive Engagement and Tasks in the Workplace Using Electroencephalography. In Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia (MUM ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 305–310. doi:10.1145/3152832.3152865
[BibTeX] [Abstract] [PDF]
Today’s workplaces are dynamic and complex. Digital data sources such as email and video conferencing aim to support workers but also add to their burden of multitasking. Psychophysiological sensors such as Electroencephalography (EEG) can provide users with cues about their cognitive state. We introduce BrainAtWork, a workplace engagement and task logger which shows users their cognitive state while working on different tasks. In a lab study with eleven participants working on their own real-world tasks, we gathered 16 hours of EEG and PC logs which were labeled into three classes: central, peripheral and meta work. We evaluated the usability of BrainAtWork via questionnaires and interviews. We investigated the correlations between measured cognitive engagement from EEG and subjective responses from experience sampling probes. Using random forests classification, we show the feasibility of automatically labeling work tasks into work classes. We discuss how BrainAtWork can support workers on the long term through encouraging reflection and helping in task scheduling.
@InProceedings{hassib2017mum,
author = {Hassib, Mariam and Khamis, Mohamed and Friedl, Susanne and Schneegass, Stefan and Alt, Florian},
booktitle = {{Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{BrainAtWork: Logging Cognitive Engagement and Tasks in the Workplace Using Electroencephalography}},
year = {2017},
address = {New York, NY, USA},
note = {hassib2017mum},
pages = {305--310},
publisher = {Association for Computing Machinery},
series = {MUM '17},
abstract = {Today's workplaces are dynamic and complex. Digital data sources such as email and video conferencing aim to support workers but also add to their burden of multitasking. Psychophysiological sensors such as Electroencephalography (EEG) can provide users with cues about their cognitive state. We introduce BrainAtWork, a workplace engagement and task logger which shows users their cognitive state while working on different tasks. In a lab study with eleven participants working on their own real-world tasks, we gathered 16 hours of EEG and PC logs which were labeled into three classes: central, peripheral and meta work. We evaluated the usability of BrainAtWork via questionnaires and interviews. We investigated the correlations between measured cognitive engagement from EEG and subjective responses from experience sampling probes. Using random forests classification, we show the feasibility of automatically labeling work tasks into work classes. We discuss how BrainAtWork can support workers on the long term through encouraging reflection and helping in task scheduling.},
acmid = {3152865},
doi = {10.1145/3152832.3152865},
isbn = {978-1-4503-5378-6},
keywords = {EEG, multitasking, workplace logging},
location = {Stuttgart, Germany},
numpages = {6},
timestamp = {2017.11.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017mum.pdf},
}
F. Alt and L. Ziegler. PD-Survey – Supporting Audience-Centric Research through Surveys on Public Display Networks. In Proceedings of the 25th International ACM Conference on Multimedia (MM’17), Association for Computing Machinery, New York, NY, USA, 2017. doi:10.1145/3123266.3123293
[BibTeX] [Abstract] [PDF]
We present PD-Survey, a platform to conduct surveys across a network of interactive screens. Our research is motivated by the fact that obtaining and analyzing data about users of public displays requires signi cant effort; e.g., running long-term observations or post-hoc analyses of video/interaction logs. As a result, research is often constrained to a single installation within a particular context, neither accounting for a diverse audience (children, shoppers, commuters) nor for different situations (waiting vs. passing by) or times of the day. As displays become networked, one way to address this challenge is through surveys on displays, where audience feedback is collected insitu. Since current tools do not appropriately address the requirements of a display network, we implemented a tool for use on public displays and report on its design and development. Our research is complemented by two in-the-wild deployments that (a) investigate di erent channels for feedback collection, (b) showcase how the work of researchers is supported, and (c) testify that the platform can easily be extended with novel features.
@InProceedings{alt2017mm,
author = {Alt, Florian AND Ziegler, Lukas},
booktitle = {{Proceedings of the 25th International ACM Conference on Multimedia}},
title = {{PD-Survey - Supporting Audience-Centric Research through Surveys on Public Display Networks}},
year = {2017},
address = {New York, NY, USA},
note = {alt2017mm},
publisher = {Association for Computing Machinery},
series = {MM'17},
abstract = {We present PD-Survey, a platform to conduct surveys across a network of interactive screens. Our research is motivated by the fact that obtaining and analyzing data about users of public displays requires signi cant effort; e.g., running long-term observations or post-hoc analyses of video/interaction logs. As a result, research is often constrained to a single installation within a particular context, neither accounting for a diverse audience (children, shoppers, commuters) nor for different situations (waiting vs. passing by) or times of the day. As displays become networked, one way to address this challenge is through surveys on displays, where audience feedback is collected insitu. Since current tools do not appropriately address the requirements of a display network, we implemented a tool for use on public displays and report on its design and development. Our research is complemented by two in-the-wild deployments that (a) investigate di erent channels for feedback collection, (b) showcase how the work of researchers is supported, and (c) testify that the platform can easily be extended with novel features.},
doi = {10.1145/3123266.3123293},
keywords = {public displays, surveys},
location = {Mountain View, CA, USA},
numpages = {9},
timestamp = {2017.10.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2017mm.pdf},
}
M. Khamis, M. Hassib, E. von Zezschwitz, A. Bulling, and F. Alt. GazeTouchPIN: Protecting Sensitive Data on Mobile Devices using Secure Multimodal Authentication. In Proceedings of the 19th ACM International Conference on Multimodal Interaction (ICMI 2017), Association for Computing Machinery, New York, NY, USA, 2017. doi:10.1145/3136755.3136809
[BibTeX] [Abstract] [PDF]
Although mobile devices provide access to a plethora of sensitive data, most users still only protect them with PINs or patterns, which are vulnerable to side-channel attacks (e.g., shoulder surfing). How-ever, prior research has shown that privacy-aware users are willing to take further steps to protect their private data. We propose GazeTouchPIN, a novel secure authentication scheme for mobile devices that combines gaze and touch input. Our multimodal approach complicates shoulder-surfing attacks by requiring attackers to ob-serve the screen as well as the user’s eyes to and the password. We evaluate the security and usability of GazeTouchPIN in two user studies (N=30). We found that while GazeTouchPIN requires longer entry times, privacy aware users would use it on-demand when feeling observed or when accessing sensitive data. The results show that successful shoulder surfing attack rate drops from 68% to 10.4%when using GazeTouchPIN.
@InProceedings{khamis2017icmi,
author = {Khamis, Mohamed and Hassib, Mariam and von Zezschwitz, Emanuel and Bulling, Andreas and Alt, Florian},
booktitle = {{Proceedings of the 19th ACM International Conference on Multimodal Interaction}},
title = {{GazeTouchPIN: Protecting Sensitive Data on Mobile Devices using Secure Multimodal Authentication}},
year = {2017},
address = {New York, NY, USA},
note = {khamis2017icmi},
publisher = {Association for Computing Machinery},
series = {ICMI 2017},
abstract = {Although mobile devices provide access to a plethora of sensitive data, most users still only protect them with PINs or patterns, which are vulnerable to side-channel attacks (e.g., shoulder surfing). How-ever, prior research has shown that privacy-aware users are willing to take further steps to protect their private data. We propose GazeTouchPIN, a novel secure authentication scheme for mobile devices that combines gaze and touch input. Our multimodal approach complicates shoulder-surfing attacks by requiring attackers to ob-serve the screen as well as the user’s eyes to and the password. We evaluate the security and usability of GazeTouchPIN in two user studies (N=30). We found that while GazeTouchPIN requires longer entry times, privacy aware users would use it on-demand when feeling observed or when accessing sensitive data. The results show that successful shoulder surfing attack rate drops from 68% to 10.4%when using GazeTouchPIN.},
acmid = {3136809},
doi = {10.1145/3136755.3136809},
isbn = {978-1-4503-5543-8/17/11},
location = {Glasgow, Scotland},
numpages = {5},
timestamp = {2017.10.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017icmi.pdf},
}
E. Lösch, F. Alt, and M. Koch. Mirror, Mirror on the Wall: Attracting Passers-by to Public Touch Displays With User Representations. In Proceedings of the 2017 ACM International Conference on Interactive Surfaces and Spaces (ISS ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 22–31. doi:10.1145/3132272.3134129
[BibTeX] [Abstract] [PDF]
In this paper, we investigate how effectively users’ representations convey interactivity and foster interaction on large information touch displays. This research is motivated by the fact that user representations have been shown to be very efficient in playful applications that support mid-air interaction. At the same time, little is known about the effects of applying this approach to settings with a different primary mode of interaction, e.g. touch. It is also unclear how the playfulness of user representations influences the interest of users in the displayed information. To close this gap, we combine a touch display with screens showing life-sized video representations of passers-by. In a deployment, we compare different spatial arrangements to understand how passers-by are attracted and enticed to interact, how they explore the application, and how they socially behave. Findings reveal that (a) opposing displays foster interaction, but (b) may also reduce interaction at the main display; (c) a large intersection between focus and nimbus helps to notice interactivity; (d) using playful elements at information displays is not counterproductive; (e) mixed interaction modalities are hard to understand.
@InProceedings{loesch2017iss,
author = {L\"{o}sch, Eva and Alt, Florian and Koch, Michael},
booktitle = {{Proceedings of the 2017 ACM International Conference on Interactive Surfaces and Spaces}},
title = {{Mirror, Mirror on the Wall: Attracting Passers-by to Public Touch Displays With User Representations}},
year = {2017},
address = {New York, NY, USA},
note = {loesch2017iss},
pages = {22--31},
publisher = {Association for Computing Machinery},
series = {ISS '17},
abstract = {In this paper, we investigate how effectively users' representations convey interactivity and foster interaction on large information touch displays. This research is motivated by the fact that user representations have been shown to be very efficient in playful applications that support mid-air interaction. At the same time, little is known about the effects of applying this approach to settings with a different primary mode of interaction, e.g. touch. It is also unclear how the playfulness of user representations influences the interest of users in the displayed information. To close this gap, we combine a touch display with screens showing life-sized video representations of passers-by. In a deployment, we compare different spatial arrangements to understand how passers-by are attracted and enticed to interact, how they explore the application, and how they socially behave. Findings reveal that (a) opposing displays foster interaction, but (b) may also reduce interaction at the main display; (c) a large intersection between focus and nimbus helps to notice interactivity; (d) using playful elements at information displays is not counterproductive; (e) mixed interaction modalities are hard to understand.},
acmid = {3134129},
doi = {10.1145/3132272.3134129},
isbn = {978-1-4503-4691-7},
keywords = {Public Displays, Touch Interaction, User Representations},
location = {Brighton, United Kingdom},
numpages = {10},
timestamp = {2017.10.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/loesch2017iss.pdf},
}
D. Buschek, J. Kinshofer, and F. Alt. A Comparative Evaluation of Spatial Targeting Behaviour Patterns for Finger and Stylus Tapping on Mobile Touchscreen Devices. Proc. Association for Computing Machinery Interact. Mob. Wearable Ubiquitous Technol., vol. 1, iss. 4, p. 126:1–126:21, 2017. doi:10.1145/3161160
[BibTeX] [Abstract] [PDF]
Models of 2D targeting error patterns have been applied as a valuable computational tool for analysing finger touch behaviour on mobile devices, improving touch accuracy and inferring context. However, their use in stylus input is yet unexplored. This paper presents the first empirical study and analyses of such models for tapping with a stylus. In a user study (N = 28), we collected targeting data on a smartphone, both for stationary use (sitting) and walking. We compare targeting patterns between index finger input and three stylus variations – two stylus widths and nib types as well as the addition of a hover cursor. Our analyses reveal that stylus targeting patterns are user-specific, and that offset models improve stylus tapping accuracy, but less so than for finger touch. Input method has a stronger influence on targeting patterns than mobility, and stylus width is more influential than the hover cursor. Stylus models improve finger accuracy as well, but not vice versa. The extent of the stylus accuracy advantage compared to the finger depends on screen location and mobility. We also discuss patterns related to mobility and gliding of the stylus on the screen. We conclude with implications for target sizes and offset model applications.
@Article{buschek2017imwut,
author = {Buschek, Daniel and Kinshofer, Julia and Alt, Florian},
journal = {{Proc. Association for Computing Machinery Interact. Mob. Wearable Ubiquitous Technol.}},
title = {{A Comparative Evaluation of Spatial Targeting Behaviour Patterns for Finger and Stylus Tapping on Mobile Touchscreen Devices}},
year = {2017},
issn = {2474-9567},
month = jan,
note = {buschek2017imwut},
number = {4},
pages = {126:1--126:21},
volume = {1},
abstract = {Models of 2D targeting error patterns have been applied as a valuable computational tool for analysing finger touch behaviour on mobile devices, improving touch accuracy and inferring context. However, their use in stylus input is yet unexplored. This paper presents the first empirical study and analyses of such models for tapping with a stylus. In a user study (N = 28), we collected targeting data on a smartphone, both for stationary use (sitting) and walking. We compare targeting patterns between index finger input and three stylus variations -- two stylus widths and nib types as well as the addition of a hover cursor. Our analyses reveal that stylus targeting patterns are user-specific, and that offset models improve stylus tapping accuracy, but less so than for finger touch. Input method has a stronger influence on targeting patterns than mobility, and stylus width is more influential than the hover cursor. Stylus models improve finger accuracy as well, but not vice versa. The extent of the stylus accuracy advantage compared to the finger depends on screen location and mobility. We also discuss patterns related to mobility and gliding of the stylus on the screen. We conclude with implications for target sizes and offset model applications.},
acmid = {3161160},
address = {New York, NY, USA},
articleno = {126},
doi = {10.1145/3161160},
issue_date = {December 2017},
keywords = {Gaussian Process regression, Stylus input, computational interaction, offset model},
numpages = {21},
publisher = {Association for Computing Machinery},
timestamp = {2017.10.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2017imwut.pdf},
}
M. Koch and F. Alt, “Allgegenwärtige Mensch-Computer-Interaktion,” in 50 jahre universitäts-informatik in münchen, A. Bode, M. Broy, H. Bungartz, and F. Matthes, Eds., Berlin, Heidelberg: Springer Berlin Heidelberg, 2017, p. 11–31. doi:10.1007/978-3-662-54712-0_2
[BibTeX] [Abstract] [PDF]
Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verständlicher Benutzerschnittstellen – sowohl für Individuen als auch für Gruppen von Benutzern. Mit diesem Teilbereich der Informatik beschäftigt sich die Mensch‐Computer‐Interaktion. Dieser Beitrag bietet zunächst eine kurze Einführung in die Forschungsmethodik der MCI und gibt einen Einblick in die Forschungsaktivitäten zu diesem Thema an den Münchner Universitäten. Im Fokus stehen hierbei Arbeiten zu öffentlichen Bildschirmen, Blickinteraktion im öffentlichen Raum, sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren.
@InBook{koch2017informatikmuenchen,
author = {Koch, Michael and Alt, Florian},
editor = {Bode, Arndt and Broy, Manfred and Bungartz, Hans-Joachim and Matthes, Florian},
pages = {11--31},
publisher = {Springer Berlin Heidelberg},
title = {{Allgegenw{\"a}rtige Mensch-Computer-Interaktion}},
year = {2017},
address = {Berlin, Heidelberg},
isbn = {978-3-662-54712-0},
note = {koch2017informatikmuenchen},
abstract = {Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verständlicher Benutzerschnittstellen – sowohl für Individuen als auch für Gruppen von Benutzern. Mit diesem Teilbereich der Informatik beschäftigt sich die Mensch‐Computer‐Interaktion. Dieser Beitrag bietet zunächst eine kurze Einführung in die Forschungsmethodik der MCI und gibt einen Einblick in die Forschungsaktivitäten zu diesem Thema an den Münchner Universitäten. Im Fokus stehen hierbei Arbeiten zu öffentlichen Bildschirmen, Blickinteraktion im öffentlichen Raum, sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren.},
booktitle = {50 Jahre Universit{\"a}ts-Informatik in M{\"u}nchen},
doi = {10.1007/978-3-662-54712-0_2},
timestamp = {2017.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/koch2017informatikmuenchen.pdf},
}
M. Koch and F. Alt. Allgegenwärtige Mensch-Computer-Interaktion. Informatik-Spektrum, p. 1–6, 2017. doi:10.1007/s00287-017-1027-4
[BibTeX] [Abstract] [PDF]
Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verständlicher Benutzerschnittstellen – sowohl für Individuen als auch für Gruppen von Benutzern. Mit diesem Teilbereich der Informatik beschäftigt sich die Mensch-Maschine-Interaktion. Dieser Artikel gibt einen Einblick in die Forschungsaktivitäten zu diesem Thema an den Münchner Universitäten. Im Fokus stehen hierbei Arbeiten zu öffentlichen Bildschirmen, Blickinteraktion im öffentlichen Raum sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren.
@Article{koch2017informatikspektrum,
author = {Koch, Michael and Alt, Florian},
journal = {{Informatik-Spektrum}},
title = {{Allgegenw{\"a}rtige Mensch-Computer-Interaktion}},
year = {2017},
issn = {1432-122X},
note = {koch2017informatikspektrum},
pages = {1--6},
abstract = {Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verst{\"a}ndlicher Benutzerschnittstellen -- sowohl f{\"u}r Individuen als auch f{\"u}r Gruppen von Benutzern. Mit diesem Teilbereich der Informatik besch{\"a}ftigt sich die Mensch-Maschine-Interaktion. Dieser Artikel gibt einen Einblick in die Forschungsaktivit{\"a}ten zu diesem Thema an den M{\"u}nchner Universit{\"a}ten. Im Fokus stehen hierbei Arbeiten zu {\"o}ffentlichen Bildschirmen, Blickinteraktion im {\"o}ffentlichen Raum sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren.},
doi = {10.1007/s00287-017-1027-4},
timestamp = {2017.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/koch2017informatikspektrum.pdf},
}
D. Buschek, M. Hackenschmied, and F. Alt. Dynamic UI Adaptations for One-Handed Use of Large Mobile Touchscreen Devices. In Proceedings of the IFIP Conference on Human-Computer Interaction (INTERACT’17), 2017, p. 184–201. doi:10.1007/978-3-319-67687-6_13
[BibTeX] [Abstract] [PDF]
We present and evaluate dynamic adaptations for mobiletouch GUIs. They mitigate reachability problems that users face whenoperating large smartphones or \phablets” with a single hand. In particular,we enhance common touch GUI elements with three simple animatedlocation and orientation changes (Roll, Bend, Move). Users cantrigger them to move GUI elements within comfortable reach. A labstudy (N=35) with two devices (4.95 in, 5.9 in) shows that these adaptationsimprove reachability on the larger device. They also reduce devicemovements required to reach the targets. Participants perceived adaptedUIs as faster, less exhausting and more comfortable to use than thebaselines. Feedback and video analyses also indicate that participants retaineda safer grip on the device through our adaptations. We concludewith design implications for (adaptive) touch GUIs on large devices.
@InProceedings{buschek2017interact,
author = {Buschek, Daniel and Hackenschmied, Maximilian and Alt, Florian},
booktitle = {{Proceedings of the IFIP Conference on Human-Computer Interaction}},
title = {{Dynamic UI Adaptations for One-Handed Use of Large Mobile Touchscreen Devices}},
year = {2017},
note = {buschek2017interact},
organization = {Springer},
pages = {184--201},
series = {INTERACT'17},
abstract = {We present and evaluate dynamic adaptations for mobiletouch GUIs. They mitigate reachability problems that users face whenoperating large smartphones or \phablets" with a single hand. In particular,we enhance common touch GUI elements with three simple animatedlocation and orientation changes (Roll, Bend, Move). Users cantrigger them to move GUI elements within comfortable reach. A labstudy (N=35) with two devices (4.95 in, 5.9 in) shows that these adaptationsimprove reachability on the larger device. They also reduce devicemovements required to reach the targets. Participants perceived adaptedUIs as faster, less exhausting and more comfortable to use than thebaselines. Feedback and video analyses also indicate that participants retaineda safer grip on the device through our adaptations. We concludewith design implications for (adaptive) touch GUIs on large devices.},
doi = {10.1007/978-3-319-67687-6_13},
timestamp = {2017.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2017interact.pdf},
}
A. Colley, J. Häkkilä, B. Pfleging, and F. Alt. A Design Space for External Displays on Cars. In Proceedings of the 9th International Conference on Automotive User Interfaces and Interactive Vehicular Applications Adjunct (AutomotiveUI ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 146–151. doi:10.1145/3131726.3131760
[BibTeX] [Abstract] [PDF]
The exterior surfaces of cars provide so far unutilized opportunities for information display. The exploitation of this space is enabled by current advances in display technologies combined with increased sensor integration, computing power, and connectivity in vehicles. With this motivation, we present a framework, mapping the design space for external vehicle displays. The audience for the displayed information may be other road users, pedestrians, or autonomous systems. This design direction is particularly interesting in the future, as the current direction towards driverless vehicles may be an enabler for increased separation, redesign, and repurposing of vehicle interior and exterior surfaces.
@InProceedings{colley2017autoui,
author = {Colley, Ashley and H\"{a}kkil\"{a}, Jonna and Pfleging, Bastian and Alt, Florian},
booktitle = {{Proceedings of the 9th International Conference on Automotive User Interfaces and Interactive Vehicular Applications Adjunct}},
title = {{A Design Space for External Displays on Cars}},
year = {2017},
address = {New York, NY, USA},
note = {colley2017autoui},
pages = {146--151},
publisher = {Association for Computing Machinery},
series = {AutomotiveUI '17},
abstract = {The exterior surfaces of cars provide so far unutilized opportunities for information display. The exploitation of this space is enabled by current advances in display technologies combined with increased sensor integration, computing power, and connectivity in vehicles. With this motivation, we present a framework, mapping the design space for external vehicle displays. The audience for the displayed information may be other road users, pedestrians, or autonomous systems. This design direction is particularly interesting in the future, as the current direction towards driverless vehicles may be an enabler for increased separation, redesign, and repurposing of vehicle interior and exterior surfaces.},
acmid = {3131760},
doi = {10.1145/3131726.3131760},
isbn = {978-1-4503-5151-5},
keywords = {Automotive UI, cars, design space, interactive surfaces, public displays},
location = {Oldenburg, Germany},
numpages = {6},
timestamp = {2017.09.22},
url = {http://www.florian-alt.org/unibw/wp-content/publications/colley2017autoui.pdf},
}
L. Trotter, C. Mai, and F. Alt. CarSketch: A Collaborative Sketching Table with Self-Propelled Tangible Objects for Automotive Applications. In Proceedings of the 9th International Conference on Automotive User Interfaces and Interactive Vehicular Applications Adjunct (AutomotiveUI ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 126–130. doi:10.1145/3131726.3131749
[BibTeX] [Abstract] [PDF]
We present CarSketch, a concept and prototype of a collaborative sketching table that supports interdisciplinary development teams during the early development phase of driver assistance systems. Due to the high costs caused by the use of physical prototypes, simulation is a common approach. Yet, the operation of state-of-the-art simulations is restricted to specialists, leaving the majority of stakeholders as passive observers. Our system for a collaborative and multi-perspective communication tool enables all participants to interact with the simulation. In particular, it (1) structures the ideation and development by providing a distraction-free environment with an easy-to-use drawing interface, (2) which is used by self-propelled tangibles to monitor and influence the simulation. (3) Additional information is provided by personal augmentation and (4) the simulation can be replayed in an immersive 3D environment. We expect the tool to be useful for multidisciplinary teams in fostering the ideation phase and finding conceptual mistakes more efficiently.
@InProceedings{trotter2017autouiadj,
author = {Trotter, Ludwig and Mai, Christian and Alt, Florian},
booktitle = {{Proceedings of the 9th International Conference on Automotive User Interfaces and Interactive Vehicular Applications Adjunct}},
title = {{CarSketch: A Collaborative Sketching Table with Self-Propelled Tangible Objects for Automotive Applications}},
year = {2017},
address = {New York, NY, USA},
note = {trotter2017autouiadj},
pages = {126--130},
publisher = {Association for Computing Machinery},
series = {AutomotiveUI '17},
abstract = {We present CarSketch, a concept and prototype of a collaborative sketching table that supports interdisciplinary development teams during the early development phase of driver assistance systems. Due to the high costs caused by the use of physical prototypes, simulation is a common approach. Yet, the operation of state-of-the-art simulations is restricted to specialists, leaving the majority of stakeholders as passive observers. Our system for a collaborative and multi-perspective communication tool enables all participants to interact with the simulation. In particular, it (1) structures the ideation and development by providing a distraction-free environment with an easy-to-use drawing interface, (2) which is used by self-propelled tangibles to monitor and influence the simulation. (3) Additional information is provided by personal augmentation and (4) the simulation can be replayed in an immersive 3D environment. We expect the tool to be useful for multidisciplinary teams in fostering the ideation phase and finding conceptual mistakes more efficiently.},
acmid = {3131749},
doi = {10.1145/3131726.3131749},
isbn = {978-1-4503-5151-5},
keywords = {Automotive, collaborative work, simulation},
location = {Oldenburg, Germany},
numpages = {5},
timestamp = {2017.09.22},
url = {http://www.florian-alt.org/unibw/wp-content/publications/trotter2017autouiadj.pdf},
}
M. Braun, N. Broy, B. Pfleging, and F. Alt. A Design Space for Conversational In-vehicle Information Systems. In Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services (MobileHCI ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 79:1–79:8. doi:10.1145/3098279.3122122
[BibTeX] [Abstract] [PDF]
In this paper we chart a design space for conversational in-vehicle information systems (IVIS). Our work is motivated by the proliferation of speech interfaces in our everyday life, which have already found their way into consumer electronics and will most likely become pervasive in future cars. Our design space is based on expert interviews as well as a comprehensive literature review. We present five core dimensions – assistant, position, dialog design, system capabilities, and driver state – and show in an initial study how these dimensions affect the design of a prototypical IVIS. Design spaces have paved the way for much of the work done in HCI including but not limited to areas such as input and pointing devices, smart phones, displays, and automotive UIs. In a similar way, we expect our design space to aid practitioners in designing future IVIS but also researchers as they explore this young area of research.
@InProceedings{braun2017mobilehciadj,
author = {Braun, Michael and Broy, Nora and Pfleging, Bastian and Alt, Florian},
booktitle = {{Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services}},
title = {{A Design Space for Conversational In-vehicle Information Systems}},
year = {2017},
address = {New York, NY, USA},
note = {braun2017mobilehciadj},
pages = {79:1--79:8},
publisher = {Association for Computing Machinery},
series = {MobileHCI '17},
abstract = {In this paper we chart a design space for conversational in-vehicle information systems (IVIS). Our work is motivated by the proliferation of speech interfaces in our everyday life, which have already found their way into consumer electronics and will most likely become pervasive in future cars. Our design space is based on expert interviews as well as a comprehensive literature review. We present five core dimensions - assistant, position, dialog design, system capabilities, and driver state - and show in an initial study how these dimensions affect the design of a prototypical IVIS. Design spaces have paved the way for much of the work done in HCI including but not limited to areas such as input and pointing devices, smart phones, displays, and automotive UIs. In a similar way, we expect our design space to aid practitioners in designing future IVIS but also researchers as they explore this young area of research.},
acmid = {3122122},
articleno = {79},
doi = {10.1145/3098279.3122122},
isbn = {978-1-4503-5075-4},
keywords = {automotive user interfaces, design space, natural language interfaces, speech interaction},
location = {Vienna, Austria},
numpages = {8},
timestamp = {2017.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2017mobilehciadj.pdf},
}
V. Gentile, M. Khamis, S. Sorce, and F. Alt. They are looking at me! Understanding how Audience Presence Impacts on Public Display Users. In Proceedings of the 2017 ACM International Symposium on Pervasive Displays (PerDis ’17), Association for Computing Machinery, New York, NY, USA, 2017. doi:10.1145/3078810.3078822
[BibTeX] [Abstract] [PDF]
It is well known from prior work, that people interacting aswell as attending to a public display attract further peopleto interact. This behavior is commonly referred to as thehoneypot effect. At the same time, there are often situationswhere an audience is present in the vicinity of a public displaythat does not actively engage or pay attention to the display oran approaching user. However, it is largely unknown how sucha passive audience impacts on users or people who intendto interact. In this paper, we investigate the influence of apassive audience on the engagement of people with a publicdisplay. In more detail, we report on the deployment of adisplay in a public space. We collected and analyzed videologs to understand how people react to passive audience in thevicinity of public displays. We found an influence on whereinteracting users position themselves relative to both displayand passive audience as well as on their behavior. Our findingsare valuable for display providers and space owners who wantto maximize the display’s benefits.
@InProceedings{gentile2017perdis,
author = {Gentile, Vito and Khamis, Mohamed and Sorce, Salvatore and Alt, Florian},
booktitle = {{Proceedings of the 2017 ACM International Symposium on Pervasive Displays}},
title = {{They are looking at me! Understanding how Audience Presence Impacts on Public Display Users}},
year = {2017},
address = {New York, NY, USA},
note = {gentile2017perdis},
publisher = {Association for Computing Machinery},
series = {PerDis '17},
abstract = {It is well known from prior work, that people interacting aswell as attending to a public display attract further peopleto interact. This behavior is commonly referred to as thehoneypot effect. At the same time, there are often situationswhere an audience is present in the vicinity of a public displaythat does not actively engage or pay attention to the display oran approaching user. However, it is largely unknown how sucha passive audience impacts on users or people who intendto interact. In this paper, we investigate the influence of apassive audience on the engagement of people with a publicdisplay. In more detail, we report on the deployment of adisplay in a public space. We collected and analyzed videologs to understand how people react to passive audience in thevicinity of public displays. We found an influence on whereinteracting users position themselves relative to both displayand passive audience as well as on their behavior. Our findingsare valuable for display providers and space owners who wantto maximize the display’s benefits.},
acmid = {3078822},
doi = {10.1145/3078810.3078822},
isbn = {978-1-4503-5045-7/17/06},
location = {Lugano, Switzerland},
numpages = {9},
timestamp = {2017.06.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/gentile2017perdis.pdf},
}
D. Huber, D. Buschek, and F. Alt. Don’t Leave: Combining Sensing Technology and Second Screens to Enhance the User Experience with TV Content. In Proceedings of the 2017 ACM International Conference on Interactive Experiences for TV and Online Video (TVX ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 115–121. doi:10.1145/3077548.3077561
[BibTeX] [Abstract] [PDF]
In this paper we explore how the use of sensing technologies can enhance people’s experience during perceiving TV content. The work is motivated by an increasing number of sensors (such as Kinect) that find their way into living rooms. Such sensors allow the behavior of viewers to be analyzed, hence providing the opportunity to instantly react to this behavior. The particular idea we explore in our work is how a second screen app triggered by the viewer’s behavior can be designed to make them re-engage with the TV content. At the outset of our work we conducted a survey (N=411) to assess viewers’ activities while watching TV. Based on the findings we implemented a Kinect-based system to detect these activities and connected it with a playful second screen app. We then conducted a field evaluation (N=20) where we compared (a) four hints to direct users’ attention to the second screen app and (b) four types of second screen content requiring different levels of engagement. We conclude with implications for both practitioners and researchers concerned with interactive TV
@InProceedings{huber2017tvx,
author = {Huber, Daniela and Buschek, Daniel and Alt, Florian},
booktitle = {{Proceedings of the 2017 ACM International Conference on Interactive Experiences for TV and Online Video}},
title = {{Don't Leave: Combining Sensing Technology and Second Screens to Enhance the User Experience with TV Content}},
year = {2017},
address = {New York, NY, USA},
note = {huber2017tvx},
pages = {115--121},
publisher = {Association for Computing Machinery},
series = {TVX '17},
abstract = {In this paper we explore how the use of sensing technologies can enhance people's experience during perceiving TV content. The work is motivated by an increasing number of sensors (such as Kinect) that find their way into living rooms. Such sensors allow the behavior of viewers to be analyzed, hence providing the opportunity to instantly react to this behavior. The particular idea we explore in our work is how a second screen app triggered by the viewer's behavior can be designed to make them re-engage with the TV content. At the outset of our work we conducted a survey (N=411) to assess viewers' activities while watching TV. Based on the findings we implemented a Kinect-based system to detect these activities and connected it with a playful second screen app. We then conducted a field evaluation (N=20) where we compared (a) four hints to direct users' attention to the second screen app and (b) four types of second screen content requiring different levels of engagement. We conclude with implications for both practitioners and researchers concerned with interactive TV},
acmid = {3077561},
doi = {10.1145/3077548.3077561},
isbn = {978-1-4503-4529-3},
keywords = {advertisements, interactive tv, kinect, user behavior},
location = {Hilversum, The Netherlands},
numpages = {7},
timestamp = {2017.05.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/huber2017tvx.pdf},
}
Y. Abdelrahman, M. Khamis, S. Schneegass, and F. Alt. Stay Cool! Understanding Thermal Attacks on Mobile-based User Authentication. In Proceedings of the 35th Annual ACM Conference on Human Factors in Computing Systems (CHI ’17), Association for Computing Machinery, New York, NY, USA, 2017. doi:10.1145/3025453.3025461
[BibTeX] [Abstract] [PDF] [Video]
PINs and patterns remain among the most widely used knowledge-based authentication schemes. As thermal cameras become ubiquitous and affordable, we foresee a new form of threat to user privacy on mobile devices. Thermal cameras allow performing thermal attacks, where heat traces, resulting from authentication, can be used to reconstruct passwords. In this work we investigate in details the viability of exploiting thermal imaging to infer PINs and patterns on mobile devices. We present a study (N=18) where we evaluated how properties of PINs and patterns influence their thermal attacks resistance. We found that thermal attacks are indeed viable on mobile devices; overlapping patterns significantly decrease successful thermal attack rate from 100% to 16.67%, while PINs remain vulnerable (>72% success rate) even with duplicate digits. We conclude by recommendations for users and designers of authentication schemes on how to resist thermal attacks.
@InProceedings{abdelrahman2017chi,
author = {Abdelrahman, Yomna and Khamis, Mohamed and Schneegass, Stefan and Alt, Florian},
booktitle = {{Proceedings of the 35th Annual ACM Conference on Human Factors in Computing Systems}},
title = {{Stay Cool! Understanding Thermal Attacks on Mobile-based User Authentication}},
year = {2017},
address = {New York, NY, USA},
note = {abdelrahman2017chi},
publisher = {Association for Computing Machinery},
series = {CHI '17},
abstract = {PINs and patterns remain among the most widely used knowledge-based authentication schemes. As thermal cameras become ubiquitous and affordable, we foresee a new form of threat to user privacy on mobile devices. Thermal cameras allow performing thermal attacks, where heat traces, resulting from authentication, can be used to reconstruct passwords. In this work we investigate in details the viability of exploiting thermal imaging to infer PINs and patterns on mobile devices. We present a study (N=18) where we evaluated how properties of PINs and patterns influence their thermal attacks resistance. We found that thermal attacks are indeed viable on mobile devices; overlapping patterns significantly decrease successful thermal attack rate from 100% to 16.67%, while PINs remain vulnerable (>72% success rate) even with duplicate digits. We conclude by recommendations for users and designers of authentication schemes on how to resist thermal attacks.},
doi = {10.1145/3025453.3025461},
location = {Denver, CO, USA},
timestamp = {2017.05.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdelrahman2017chi.pdf},
video = {abdelrahman2017chi},
}
M. Eiband, M. Khamis, E. von Zezschwitz, H. Hussmann, and F. Alt. Understanding Shoulder Surfing in the Wild: Stories from Users and Observers. In Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems (CHI ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 4254–4265. doi:10.1145/3025453.3025636
[BibTeX] [Abstract] [PDF]
Research has brought forth a variety of authentication systems to mitigate observation attacks. However, there is little work about shoulder surfing situations in the real world. We present the results of a user survey (N=174) in which we investigate actual stories about shoulder surfing on mobile devices from both users and observers. Our analysis indicates that shoulder surfing mainly occurs in an opportunistic, non-malicious way. It usually does not have serious consequences, but evokes negative feelings for both parties, resulting in a variety of coping strategies. Observed data was personal in most cases and ranged from information about interests and hobbies to login data and intimate details about third persons and relationships. Thus, our work contributes evidence for shoulder surfing in the real world and informs implications for the design of privacy protection mechanisms.
@InProceedings{eiband2017chi,
author = {Eiband, Malin and Khamis, Mohamed and von Zezschwitz, Emanuel and Hussmann, Heinrich and Alt, Florian},
booktitle = {{Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems}},
title = {{Understanding Shoulder Surfing in the Wild: Stories from Users and Observers}},
year = {2017},
address = {New York, NY, USA},
note = {eiband2017chi},
pages = {4254--4265},
publisher = {Association for Computing Machinery},
series = {CHI '17},
abstract = {Research has brought forth a variety of authentication systems to mitigate observation attacks. However, there is little work about shoulder surfing situations in the real world. We present the results of a user survey (N=174) in which we investigate actual stories about shoulder surfing on mobile devices from both users and observers. Our analysis indicates that shoulder surfing mainly occurs in an opportunistic, non-malicious way. It usually does not have serious consequences, but evokes negative feelings for both parties, resulting in a variety of coping strategies. Observed data was personal in most cases and ranged from information about interests and hobbies to login data and intimate details about third persons and relationships. Thus, our work contributes evidence for shoulder surfing in the real world and informs implications for the design of privacy protection mechanisms.},
acmid = {3025636},
doi = {10.1145/3025453.3025636},
isbn = {978-1-4503-4655-9},
keywords = {mobile devices, privacy, shoulder surfing},
location = {Denver, Colorado, USA},
numpages = {12},
timestamp = {2017.05.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/eiband2017chi.pdf},
}
M. Hassib, S. Schneegass, P. Eiglsperger, N. Henze, A. Schmidt, and F. Alt. EngageMeter: A System for Implicit Audience Engagement Sensing Using Electroencephalography. In Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems (CHI ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 5114–5119. doi:10.1145/3025453.3025669
[BibTeX] [Abstract] [PDF]
{Obtaining information about audience engagement in presentations is a valuable asset for presenters in many domains. Prior literature mostly utilized explicit methods of collecting feedback which induce distractions, add workload on audience and do not provide objective information to presenters. We present EngageMeter – a system that allows fine-grained information on audience engagement to be obtained implicitly from multiple brain-computer interfaces (BCI) and to be fed back to presenters for real time and post-hoc access. Through evaluation during an HCI conference (Naudience=11
@InProceedings{hassib2017chi2,
author = {Hassib, Mariam and Schneegass, Stefan and Eiglsperger, Philipp and Henze, Niels and Schmidt, Albrecht and Alt, Florian},
booktitle = {{Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems}},
title = {{EngageMeter: A System for Implicit Audience Engagement Sensing Using Electroencephalography}},
year = {2017},
address = {New York, NY, USA},
note = {hassib2017chi2},
pages = {5114--5119},
publisher = {Association for Computing Machinery},
series = {CHI '17},
abstract = {Obtaining information about audience engagement in presentations is a valuable asset for presenters in many domains. Prior literature mostly utilized explicit methods of collecting feedback which induce distractions, add workload on audience and do not provide objective information to presenters. We present EngageMeter - a system that allows fine-grained information on audience engagement to be obtained implicitly from multiple brain-computer interfaces (BCI) and to be fed back to presenters for real time and post-hoc access. Through evaluation during an HCI conference (Naudience=11, Npresenters=3) we found that EngageMeter provides value to presenters (a) in real-time, since it allows reacting to current engagement scores by changing tone or adding pauses, and (b) in post-hoc, since presenters can adjust their slides and embed extra elements. We discuss how EngageMeter can be used in collocated and distributed audience sensing as well as how it can aid presenters in long term use.},
acmid = {3025669},
doi = {10.1145/3025453.3025669},
isbn = {978-1-4503-4655-9},
keywords = {audience feedback, bci, eeg, physiological sensing},
location = {Denver, Colorado, USA},
numpages = {6},
timestamp = {2017.05.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017chi2.pdf},
}
M. Hassib, D. Buschek, P. W. Wozniak, and F. Alt. HeartChat: Heart Rate Augmented Mobile Chat to Support Empathy and Awareness. In Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems (CHI ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 2239–2251. doi:10.1145/3025453.3025758
[BibTeX] [Abstract] [PDF]
Textual communication via mobile phones suffers from a lack of context and emotional awareness. We present a mobile chat application, HeartChat, which integrates heart rate as a cue to increase awareness and empathy. Through a literature review and a focus group, we identified design dimensions important for heart rate augmented chats. We created three concepts showing heart rate per message, in real-time, or sending it explicitly. We tested our system in a two week in-the-wild study with 14 participants (7 pairs). Interviews and questionnaires showed that HeartChat supports empathy between people, in particular close friends and partners. Sharing heart rate helped them to implicitly understand each other’s context (e.g. location, physical activity) and emotional state, and sparked curiosity on special occasions. We discuss opportunities, challenges, and design implications for enriching mobile chats with physiological sensing.
@InProceedings{hassib2017chi1,
author = {Hassib, Mariam and Buschek, Daniel and Wozniak, Pawe\l W. and Alt, Florian},
booktitle = {{Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems}},
title = {{HeartChat: Heart Rate Augmented Mobile Chat to Support Empathy and Awareness}},
year = {2017},
address = {New York, NY, USA},
note = {hassib2017chi1},
pages = {2239--2251},
publisher = {Association for Computing Machinery},
series = {CHI '17},
abstract = {Textual communication via mobile phones suffers from a lack of context and emotional awareness. We present a mobile chat application, HeartChat, which integrates heart rate as a cue to increase awareness and empathy. Through a literature review and a focus group, we identified design dimensions important for heart rate augmented chats. We created three concepts showing heart rate per message, in real-time, or sending it explicitly. We tested our system in a two week in-the-wild study with 14 participants (7 pairs). Interviews and questionnaires showed that HeartChat supports empathy between people, in particular close friends and partners. Sharing heart rate helped them to implicitly understand each other's context (e.g. location, physical activity) and emotional state, and sparked curiosity on special occasions. We discuss opportunities, challenges, and design implications for enriching mobile chats with physiological sensing.},
acmid = {3025758},
doi = {10.1145/3025453.3025758},
isbn = {978-1-4503-4655-9},
keywords = {affective computing, heart rate, instant messagingg, physiological sensing},
location = {Denver, Colorado, USA},
numpages = {13},
timestamp = {2017.05.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017chi1.pdf},
}
M. Hassib, M. Pfeiffer, S. Schneegass, M. Rohs, and F. Alt. Emotion Actuator: Embodied Emotional Feedback Through Electroencephalography and Electrical Muscle Stimulation. In Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems (CHI ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 6133–6146. doi:10.1145/3025453.3025953
[BibTeX] [Abstract] [PDF]
The human body reveals emotional and bodily states through measurable signals, such as body language and electroencephalography. However, such manifestations are difficult to communicate to others remotely. We propose EmotionActuator, a proof-of-concept system to investigate the transmission of emotional states in which the recipient performs emotional gestures to understand and interpret the state of the sender.We call this kind of communication embodied emotional feedback, and present a prototype implementation. To realize our concept we chose four emotional states: amused, sad, angry, and neutral. We designed EmotionActuator through a series of studies to assess emotional classification via EEG, and create an EMS gesture set by comparing composed gestures from the literature to sign-language gestures. Through a final study with the end-to-end prototype interviews revealed that participants like implicit sharing of emotions and find the embodied output to be immersive, but want to have control over shared emotions and with whom. This work contributes a proof of concept system and set of design recommendations for designing embodied emotional feedback systems.
@InProceedings{hassib2017chi3,
author = {Hassib, Mariam and Pfeiffer, Max and Schneegass, Stefan and Rohs, Michael and Alt, Florian},
booktitle = {{Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems}},
title = {{Emotion Actuator: Embodied Emotional Feedback Through Electroencephalography and Electrical Muscle Stimulation}},
year = {2017},
address = {New York, NY, USA},
note = {hassib2017chi3},
pages = {6133--6146},
publisher = {Association for Computing Machinery},
series = {CHI '17},
abstract = {The human body reveals emotional and bodily states through measurable signals, such as body language and electroencephalography. However, such manifestations are difficult to communicate to others remotely. We propose EmotionActuator, a proof-of-concept system to investigate the transmission of emotional states in which the recipient performs emotional gestures to understand and interpret the state of the sender.We call this kind of communication embodied emotional feedback, and present a prototype implementation. To realize our concept we chose four emotional states: amused, sad, angry, and neutral. We designed EmotionActuator through a series of studies to assess emotional classification via EEG, and create an EMS gesture set by comparing composed gestures from the literature to sign-language gestures. Through a final study with the end-to-end prototype interviews revealed that participants like implicit sharing of emotions and find the embodied output to be immersive, but want to have control over shared emotions and with whom. This work contributes a proof of concept system and set of design recommendations for designing embodied emotional feedback systems.},
acmid = {3025953},
doi = {10.1145/3025453.3025953},
isbn = {978-1-4503-4655-9},
keywords = {affect display, affective computing, eeg., emotion, emotion sharing, ems},
location = {Denver, Colorado, USA},
numpages = {14},
timestamp = {2017.05.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017chi3.pdf},
}
D. Buschek and F. Alt. ProbUI: Generalising Touch Target Representations to Enable Declarative Gesture Definition for Probabilistic GUIs. In Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems (CHI ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 4640–4653. doi:10.1145/3025453.3025502
[BibTeX] [Abstract] [PDF]
We present ProbUI, a mobile touch GUI framework that merges ease of use of declarative gesture definition with the benefits of probabilistic reasoning. It helps developers to handle uncertain input and implement feedback and GUI adaptations. ProbUI replaces today’s static target models (bounding boxes) with probabilistic gestures (“bounding behaviours”). It is the first touch GUI framework to unite concepts from three areas of related work: 1) Developers declaratively define touch behaviours for GUI targets. As a key insight, the declarations imply simple probabilistic models (HMMs with 2D Gaussian emissions). 2) ProbUI derives these models automatically to evaluate users’ touch sequences. 3) It then infers intended behaviour and target. Developers bind callbacks to gesture progress, completion, and other conditions. We show ProbUI’s value by implementing existing and novel widgets, and report developer feedback from a survey and a lab study.
@InProceedings{buschek2017chi,
author = {Buschek, Daniel and Alt, Florian},
booktitle = {{Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems}},
title = {{ProbUI: Generalising Touch Target Representations to Enable Declarative Gesture Definition for Probabilistic GUIs}},
year = {2017},
address = {New York, NY, USA},
note = {buschek2017chi},
pages = {4640--4653},
publisher = {Association for Computing Machinery},
series = {CHI '17},
abstract = {We present ProbUI, a mobile touch GUI framework that merges ease of use of declarative gesture definition with the benefits of probabilistic reasoning. It helps developers to handle uncertain input and implement feedback and GUI adaptations. ProbUI replaces today's static target models (bounding boxes) with probabilistic gestures ("bounding behaviours"). It is the first touch GUI framework to unite concepts from three areas of related work: 1) Developers declaratively define touch behaviours for GUI targets. As a key insight, the declarations imply simple probabilistic models (HMMs with 2D Gaussian emissions). 2) ProbUI derives these models automatically to evaluate users' touch sequences. 3) It then infers intended behaviour and target. Developers bind callbacks to gesture progress, completion, and other conditions. We show ProbUI's value by implementing existing and novel widgets, and report developer feedback from a survey and a lab study.},
acmid = {3025502},
doi = {10.1145/3025453.3025502},
isbn = {978-1-4503-4655-9},
keywords = {gui framework, probabilistic modelling, touch gestures},
location = {Denver, Colorado, USA},
numpages = {14},
timestamp = {2017.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2017chi.pdf},
}
M. Al-Sada, M. Khamis, A. Kato, S. Sugano, T. Nakajima, and F. Alt. Challenges and opportunities of supernumerary robotic limbs. In CHI 2017 workshop on Amplification and Augmentation of Human Perception (), 2017.
[BibTeX] [PDF]
@inproceedings{alsada2017chiws,
author = {Al-Sada, Mohammed and Khamis, Mohamed and Kato, Akira and Sugano, Shigeki and Nakajima, Tatsuo and Alt, Florian},
booktitle = {{CHI 2017 workshop on Amplification and Augmentation of Human Perception}},
year = {2017},
timestamp = {2017.05.09},
title = {Challenges and Opportunities of Supernumerary Robotic Limbs},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alsada2017chiws.pdf},
note={alsada2017chiws}
}
M. Khamis, R. Hasholzner, A. Bulling, and F. Alt. GTmoPass: Two-factor Authentication on Public Displays Using GazeTouch passwords and Personal Mobile Devices. In Proceedings of the 2017 ACM International Symposium on Pervasive Displays (PerDis ’17), Association for Computing Machinery, New York, NY, USA, 2017. doi:10.1145/3078810.3078815
[BibTeX] [Abstract] [PDF]
As public displays continue to deliver increasingly private and personalized content, there is a need to ensure that only the legitimate users can access private information in sensitive contexts. While public displays can adopt similar authentication concepts like those used on public terminals (e.g., ATMs), authentication in public is subject to a number of risks. Namely, adversaries can uncover a user’s password through (1) shoulder surfing, (2) thermal attacks, or (3) smudge attacks. To address this problem we propose GTmoPass, an authentication architecture that enables Multi-factor user authentication on public displays. The first factor is a knowledge-factor: we employ a shoulder-surfing resilient multimodal scheme that combines gaze and touch input for password entry. The second factor is a possession-factor: users utilize their personal mobile devices, on which they enter the password. Credentials are securely transmitted to a server via Bluetooth beacons. We describe the implementation of GTmoPass and report on an evaluation of its usability and security, which shows that although authentication using GTmoPass is slightly slower than traditional methods, it protects against the three aforementioned threats.
@InProceedings{khamis2017perdis,
author = {Khamis, Mohamed and Hasholzner, Regina and Bulling, Andreas and Alt, Florian},
booktitle = {{Proceedings of the 2017 ACM International Symposium on Pervasive Displays}},
title = {{GTmoPass: Two-factor Authentication on Public Displays Using GazeTouch passwords and Personal Mobile Devices}},
year = {2017},
address = {New York, NY, USA},
note = {khamis2017perdis},
publisher = {Association for Computing Machinery},
series = {PerDis '17},
abstract = {As public displays continue to deliver increasingly private and personalized content, there is a need to ensure that only the legitimate users can access private information in sensitive contexts. While public displays can adopt similar authentication concepts like those used on public terminals (e.g., ATMs), authentication in public is subject to a number of risks. Namely, adversaries can uncover a user's password through (1) shoulder surfing, (2) thermal attacks, or (3) smudge attacks. To address this problem we propose GTmoPass, an authentication architecture that enables Multi-factor user authentication on public displays. The first factor is a knowledge-factor: we employ a shoulder-surfing resilient multimodal scheme that combines gaze and touch input for password entry. The second factor is a possession-factor: users utilize their personal mobile devices, on which they enter the password. Credentials are securely transmitted to a server via Bluetooth beacons. We describe the implementation of GTmoPass and report on an evaluation of its usability and security, which shows that although authentication using GTmoPass is slightly slower than traditional methods, it protects against the three aforementioned threats.},
acmid = {3078815},
doi = {10.1145/3078810.3078815},
isbn = {978-1-4503-5045-7/17/06},
location = {Lugano, Switzerland},
numpages = {9},
owner = {florian},
timestamp = {2017.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017perdis.pdf},
}
S. Oberhuber, T. Kothe, S. Schneegass, and F. Alt. Augmented Games: Exploring Design Opportunities in AR Settings With Children. In Proceedings of the 2017 Conference on Interaction Design and Children (IDC ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 371–377. doi:10.1145/3078072.3079734
[BibTeX] [Abstract] [PDF]
In this paper we investigate how Augmented Reality (AR) technology influences children during creative content generation in playful settings. The work is motivated by the recent spread of AR and the fact that children get in touch with this technology through their smart phones very early on. To understand the consequences, we implemented an app for smart mobile devices that allows children to create treasure hunts using GPS coordinates and marker-based AR functionality. During a qualitative user study, we asked students (n=27) to create traditional (paper + art supplies) and digital (paper + art supplies + AR app) treasure hunts and compared the resulting games, among other metrics, in terms of complexity, length and types of media used. Whereas traditional treasure hunts were linear, centered around locations and delivered information with text only, digital treasure hunts were more complex, focused on visual aspects and frequently integrated storytelling.
@InProceedings{oberhuber2017idc,
author = {Oberhuber, Sascha and Kothe, Tina and Schneegass, Stefan and Alt, Florian},
booktitle = {{Proceedings of the 2017 Conference on Interaction Design and Children}},
title = {{Augmented Games: Exploring Design Opportunities in AR Settings With Children}},
year = {2017},
address = {New York, NY, USA},
note = {oberhuber2017idc},
pages = {371--377},
publisher = {Association for Computing Machinery},
series = {IDC '17},
abstract = {In this paper we investigate how Augmented Reality (AR) technology influences children during creative content generation in playful settings. The work is motivated by the recent spread of AR and the fact that children get in touch with this technology through their smart phones very early on. To understand the consequences, we implemented an app for smart mobile devices that allows children to create treasure hunts using GPS coordinates and marker-based AR functionality. During a qualitative user study, we asked students (n=27) to create traditional (paper + art supplies) and digital (paper + art supplies + AR app) treasure hunts and compared the resulting games, among other metrics, in terms of complexity, length and types of media used. Whereas traditional treasure hunts were linear, centered around locations and delivered information with text only, digital treasure hunts were more complex, focused on visual aspects and frequently integrated storytelling.},
acmid = {3079734},
doi = {10.1145/3078072.3079734},
isbn = {978-1-4503-4921-5},
keywords = {AR, children, creativity, education, storytelling},
location = {Stanford, California, USA},
numpages = {7},
timestamp = {2017.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/oberhuber2017idc.pdf},
}
R. Linke, T. Kothe, and F. Alt. TaBooGa: A Hybrid Learning App to Support Children’s Reading Motivation. In Proceedings of the 2017 Conference on Interaction Design and Children (IDC ’17), Association for Computing Machinery, New York, NY, USA, 2017, p. 278–285. doi:10.1145/3078072.3079712
[BibTeX] [Abstract] [PDF]
In this paper we present TaBooGa (Tangible Book Game), a hybrid learning application we developed to increase children’s reading motivation. As children are exposed to digital devices early on (e.g., smart phones and tablets) weak readers are particularly apt to prefer digital offers over reading traditional books. Prior work has shown that ebooks can partially address this challenge by making reading more compelling for children. In this work we show that augmenting ebooks with tangible elements can further increase the reading motivation. In particular, we embed tangible elements that allow for navigating through the book as well as in the form of mini-games that interlace the reading task. We report on the results of an evaluation among 22 primary school pupils, comparing the influence of the approach on both strong and weak readers. Our results show a positive influence beyond reading motivation on both weak and strong readers. Yet, the approach requires to strive a balance between the tangible elements being motivating while at the same time not being too distracting.
@InProceedings{linke2017idc,
author = {Linke, Rebecca and Kothe, Tina and Alt, Florian},
booktitle = {{Proceedings of the 2017 Conference on Interaction Design and Children}},
title = {{TaBooGa: A Hybrid Learning App to Support Children's Reading Motivation}},
year = {2017},
address = {New York, NY, USA},
note = {linke2017idc},
pages = {278--285},
publisher = {Association for Computing Machinery},
series = {IDC '17},
abstract = {In this paper we present TaBooGa (Tangible Book Game), a hybrid learning application we developed to increase children's reading motivation. As children are exposed to digital devices early on (e.g., smart phones and tablets) weak readers are particularly apt to prefer digital offers over reading traditional books. Prior work has shown that ebooks can partially address this challenge by making reading more compelling for children. In this work we show that augmenting ebooks with tangible elements can further increase the reading motivation. In particular, we embed tangible elements that allow for navigating through the book as well as in the form of mini-games that interlace the reading task. We report on the results of an evaluation among 22 primary school pupils, comparing the influence of the approach on both strong and weak readers. Our results show a positive influence beyond reading motivation on both weak and strong readers. Yet, the approach requires to strive a balance between the tangible elements being motivating while at the same time not being too distracting.},
acmid = {3079712},
doi = {10.1145/3078072.3079712},
isbn = {978-1-4503-4921-5},
keywords = {book-app, hybrid, literature, motivation, reading, tangible},
location = {Stanford, California, USA},
numpages = {8},
timestamp = {2017.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/linke2017idc.pdf},
}
O. Duerr, M. Khamis, D. Buschek, and F. Alt. HelpMe: Assisting Older Adults in Performing Tasks on Mobile Devices. In Proceedings of the CHI 2017 Workshop on Designing Mobile Interactions for the Ageing Populations (), New York, NY, USA, 2017.
[BibTeX] [Abstract] [PDF]
Although mobile devices are becoming more ubiquitous,older adults have trouble catching up with the dynamics oftechnological innovation in smartphones. Most custom solutionsfor them rely on a proprietary UI with an extenuatednumber of interaction possibilities. While these solutions dohelp with basic tasks such as calling the right person, manyof the benefits of having a smartphone are clearly dislodged.We introduce and evaluate a prototype, HelpMe, forolder adult users who want to use more demanding Appswithout external assistance. Through a prestudy we uncovereda set of behaviors that imply that the user needs assistance.By detecting these behaviors or by manual request,HelpMe overlays information that explain to the user whatcan be done on the current screen and what the different UIsymbols resemble. We evaluated HelpMe in a subsequentstudy where we collected feedback and measured the workload.Our findings show that older adult users would benefitfrom HelpMe, and that it reduces the perceived workload.
@InProceedings{duerr2017olderadults,
author = {Duerr, Oliver and Khamis, Mohamed and Buschek, Daniel and Alt, Florian},
booktitle = {{Proceedings of the CHI 2017 Workshop on Designing Mobile Interactions for the Ageing Populations}},
title = {{HelpMe: Assisting Older Adults in Performing Tasks on Mobile Devices}},
year = {2017},
address = {New York, NY, USA},
note = {duerr2017olderadults},
abstract = {Although mobile devices are becoming more ubiquitous,older adults have trouble catching up with the dynamics oftechnological innovation in smartphones. Most custom solutionsfor them rely on a proprietary UI with an extenuatednumber of interaction possibilities. While these solutions dohelp with basic tasks such as calling the right person, manyof the benefits of having a smartphone are clearly dislodged.We introduce and evaluate a prototype, HelpMe, forolder adult users who want to use more demanding Appswithout external assistance. Through a prestudy we uncovereda set of behaviors that imply that the user needs assistance.By detecting these behaviors or by manual request,HelpMe overlays information that explain to the user whatcan be done on the current screen and what the different UIsymbols resemble. We evaluated HelpMe in a subsequentstudy where we collected feedback and measured the workload.Our findings show that older adult users would benefitfrom HelpMe, and that it reduces the perceived workload.},
location = {Denver, CO, USA},
timestamp = {2017.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/duerr2017olderadults.pdf},
}
M. Al Sada, M. Khamis, A. Kato, S. Sugano, T. Nakajima, and F. Alt. Challenges and Opportunities of Supernumerary Robotic Limbs. In Proceedings of the CHI 2017 Workshop on Amplification and Augmentation of Human Perception (Amplify’17), New York, NY, USA, 2017.
[BibTeX] [Abstract] [PDF]
Recent advancements in robotics and wearables made itpossible to augment humans with additional robotic limbs(e.g., extra pair of arms). However, these advances havebeen dispersed among different research communities withvery little attention to the user’s perspective. In this work wetake a first step to close this gap. We report on the resultsof two focus groups that uncovered expectations and concernsof potential users of Supernumerary Robotic Limbs(SRLs). There is a wide range of applications for SRLswithin daily usage contexts, like enabling new perceptions,commuting and communication methods as well as enhancingexisting ones. Yet, several requirements need to be metbefore SRLs can be widely adopted, such as multipurposedesign and adequate sensory feedback. We discuss howthese findings influence the design of future SRLs.
@InProceedings{alsada2017amplify,
author = {Al Sada, Mohammed and Khamis, Mohamed and Kato, Akira and Sugano, Shigeki and Nakajima, Tatsuo and Alt, Florian},
booktitle = {{Proceedings of the CHI 2017 Workshop on Amplification and Augmentation of Human Perception}},
title = {{Challenges and Opportunities of Supernumerary Robotic Limbs}},
year = {2017},
address = {New York, NY, USA},
note = {alsada2017amplify},
series = {Amplify'17},
abstract = {Recent advancements in robotics and wearables made itpossible to augment humans with additional robotic limbs(e.g., extra pair of arms). However, these advances havebeen dispersed among different research communities withvery little attention to the user’s perspective. In this work wetake a first step to close this gap. We report on the resultsof two focus groups that uncovered expectations and concernsof potential users of Supernumerary Robotic Limbs(SRLs). There is a wide range of applications for SRLswithin daily usage contexts, like enabling new perceptions,commuting and communication methods as well as enhancingexisting ones. Yet, several requirements need to be metbefore SRLs can be widely adopted, such as multipurposedesign and adequate sensory feedback. We discuss howthese findings influence the design of future SRLs.},
location = {Denver, CO, USA},
timestamp = {2017.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alsada2017amplify.pdf},
}
C. George, M. Khamis, M. Burger, H. Schmidt, F. Alt, and H. Hussmann. Seamless and Secure VR: Adapting and Evaluating Established Authentication Systems for Virtual Reality. In Proceedings of the Usable Security Mini Conference 2017 (), Internet Society, San Diego, CA, USA, 2017. doi:10.14722/usec.2017.23028
[BibTeX] [Abstract] [PDF]
Virtual reality (VR) headsets are enabling a wide range of newopportunities for the user. For example, in the near future usersmay be able to visit virtual shopping malls and virtually joininternational conferences. These and many other scenarios posenew questions with regards to privacy and security, in particularauthentication of users within the virtual environment. As a firststep towards seamless VR authentication, this paper investigatesthe direct transfer of well-established concepts (PIN, Androidunlock patterns) into VR. In a pilot study (N = 5) and a labstudy (N = 25), we adapted existing mechanisms and evaluatedtheir usability and security for VR. The results indicate thatboth PINs and patterns are well suited for authentication inVR. We found that the usability of both methods matched theperformance known from the physical world. In addition, theprivate visual channel makes authentication harder to observe,indicating that authentication in VR using traditional conceptsalready achieves a good balance in the trade-off between usabilityand security. The paper contributes to a better understanding ofauthentication within VR environments, by providing the firstinvestigation of established authentication methods within VR,and presents the base layer for the design of future authenticationschemes, which are used in VR environments only.
@InProceedings{george2017usec,
author = {Ceenu George AND Mohamed Khamis AND Marinus Burger AND Henri Schmidt AND Florian Alt AND Heinrich Hussmann},
booktitle = {{Proceedings of the Usable Security Mini Conference 2017}},
title = {{Seamless and Secure VR: Adapting and Evaluating Established Authentication Systems for Virtual Reality}},
year = {2017},
address = {San Diego, CA, USA},
note = {george2017usec},
publisher = {Internet Society},
abstract = {Virtual reality (VR) headsets are enabling a wide range of newopportunities for the user. For example, in the near future usersmay be able to visit virtual shopping malls and virtually joininternational conferences. These and many other scenarios posenew questions with regards to privacy and security, in particularauthentication of users within the virtual environment. As a firststep towards seamless VR authentication, this paper investigatesthe direct transfer of well-established concepts (PIN, Androidunlock patterns) into VR. In a pilot study (N = 5) and a labstudy (N = 25), we adapted existing mechanisms and evaluatedtheir usability and security for VR. The results indicate thatboth PINs and patterns are well suited for authentication inVR. We found that the usability of both methods matched theperformance known from the physical world. In addition, theprivate visual channel makes authentication harder to observe,indicating that authentication in VR using traditional conceptsalready achieves a good balance in the trade-off between usabilityand security. The paper contributes to a better understanding ofauthentication within VR environments, by providing the firstinvestigation of established authentication methods within VR,and presents the base layer for the design of future authenticationschemes, which are used in VR environments only.},
doi = {10.14722/usec.2017.23028},
owner = {florian},
timestamp = {2017.02.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/george2017usec.pdf},
}

2016

M. Khamis, L. Trotter, M. Tessmann, C. Dannhart, A. Bulling, and F. Alt. EyeVote in the Wild: Do Users Bother Correcting System Errors on Public Displays?. In Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia (MUM ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 57–62. doi:10.1145/3012709.3012743
[BibTeX] [Abstract] [PDF]
Although recovering from errors is straightforward on most interfaces, public display systems pose very unique design challenges. Namely, public display users interact for very short amounts of times and are believed to abandon the display when interrupted or forced to deviate from the main task. To date, it is not well understood whether public display designers should enable users to correct errors (e.g. by asking users to confirm or giving them a chance correct their input), or aim for faster interaction and rely on other types of feedback to estimate errors. To close this gap, we conducted a field study where we investigated the users willingness to correct their input on public displays. We report on our findings from an in-the-wild deployment of a public gaze-based voting system where we intentionally evoked system errors to see if users correct them. We found that public display users are willing to correct system errors provided that the correction is fast and straightforward. We discuss how our findings influence the choice of interaction methods for public displays; interaction methods that are highly usable but suffer from low accuracy can still be effective if users can “undo” their interactions.
@InProceedings{khamis2016mum,
author = {Khamis, Mohamed and Trotter, Ludwig and Tessmann, Markus and Dannhart, Christina and Bulling, Andreas and Alt, Florian},
booktitle = {{Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{EyeVote in the Wild: Do Users Bother Correcting System Errors on Public Displays?}},
year = {2016},
address = {New York, NY, USA},
note = {khamis2016mum},
pages = {57--62},
publisher = {Association for Computing Machinery},
series = {MUM '16},
abstract = {Although recovering from errors is straightforward on most interfaces, public display systems pose very unique design challenges. Namely, public display users interact for very short amounts of times and are believed to abandon the display when interrupted or forced to deviate from the main task. To date, it is not well understood whether public display designers should enable users to correct errors (e.g. by asking users to confirm or giving them a chance correct their input), or aim for faster interaction and rely on other types of feedback to estimate errors. To close this gap, we conducted a field study where we investigated the users willingness to correct their input on public displays. We report on our findings from an in-the-wild deployment of a public gaze-based voting system where we intentionally evoked system errors to see if users correct them. We found that public display users are willing to correct system errors provided that the correction is fast and straightforward. We discuss how our findings influence the choice of interaction methods for public displays; interaction methods that are highly usable but suffer from low accuracy can still be effective if users can "undo" their interactions.},
acmid = {3012743},
doi = {10.1145/3012709.3012743},
isbn = {978-1-4503-4860-7},
keywords = {gaze interaction, public displays, smooth pursuit, voting},
location = {Rovaniemi, Finland},
numpages = {6},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016mum.pdf},
}
F. Alt, M. Mikusz, S. Schneegass, and A. Bulling. Memorability of Cued-recall Graphical Passwords with Saliency Masks. In Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia (MUM ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 191–200. doi:10.1145/3012709.3012730
[BibTeX] [Abstract] [PDF]
Cued-recall graphical passwords have a lot of potential for secure user authentication, particularly if combined with saliency masks to prevent users from selecting weak passwords. Saliency masks were shown to significantly improve password security by excluding those areas of the image that are most likely to lead to hotspots. In this paper we investigate the impact of such saliency masks on the memorability of cued-recall graphical passwords. We first conduct two pre-studies (N=52) to obtain a set of images with three different image complexities as well as real passwords. A month-long user study (N=26) revealed that there is a strong learning effect for graphical passwords, in particular if defined on images with a saliency mask. While for complex images, the learning curve is steeper than for less complex ones, they best supported memorability in the long term, most likely because they provided users more alternatives to select memorable password points. These results complement prior work on the security of such passwords and underline the potential of saliency masks as both a secure and usable improvement to cued-recall gaze-based graphical passwords.
@InProceedings{alt2016mum,
author = {Alt, Florian and Mikusz, Mateusz and Schneegass, Stefan and Bulling, Andreas},
booktitle = {{Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Memorability of Cued-recall Graphical Passwords with Saliency Masks}},
year = {2016},
address = {New York, NY, USA},
note = {alt2016mum},
pages = {191--200},
publisher = {Association for Computing Machinery},
series = {MUM '16},
abstract = {Cued-recall graphical passwords have a lot of potential for secure user authentication, particularly if combined with saliency masks to prevent users from selecting weak passwords. Saliency masks were shown to significantly improve password security by excluding those areas of the image that are most likely to lead to hotspots. In this paper we investigate the impact of such saliency masks on the memorability of cued-recall graphical passwords. We first conduct two pre-studies (N=52) to obtain a set of images with three different image complexities as well as real passwords. A month-long user study (N=26) revealed that there is a strong learning effect for graphical passwords, in particular if defined on images with a saliency mask. While for complex images, the learning curve is steeper than for less complex ones, they best supported memorability in the long term, most likely because they provided users more alternatives to select memorable password points. These results complement prior work on the security of such passwords and underline the potential of saliency masks as both a secure and usable improvement to cued-recall gaze-based graphical passwords.},
acmid = {3012730},
doi = {10.1145/3012709.3012730},
isbn = {978-1-4503-4860-7},
keywords = {cued-recall graphical passwords, memorability, saliency masks, user authentication, user study},
location = {Rovaniemi, Finland},
numpages = {10},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016mum.pdf},
}
N. Broy, V. Lindner, and F. Alt. The S3D-UI Designer: Creating User Interface Prototypes for 3D Displays. In Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia (MUM ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 49–55. doi:10.1145/3012709.3012727
[BibTeX] [Abstract] [PDF]
In this paper, we present the S3D-UI Designer –- a tool to create prototypes for 3D displays. Stereoscopic (S3D) displays are quickly becoming popular beyond cinemas and home entertainments. S3D displays can today already be found in mobile phones, public displays, and car dashboards. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. At the same time, prototyping these UIs is challenging, as with traditional techniques, UI elements cannot easily be rendered and positioned in 3D space. In contrast to professional 3D authoring tools, we present a tool targeted towards non-experts to quickly and easily sketch a S3D UI and instantly render it on a 3D display. We report on the design of the tool by means of a workshop and present an evaluation study with 26 participants assessing its usabilty.
@InProceedings{broy2016mum,
author = {Broy, Nora and Lindner, Verena and Alt, Florian},
booktitle = {{Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{The S3D-UI Designer: Creating User Interface Prototypes for 3D Displays}},
year = {2016},
address = {New York, NY, USA},
note = {broy2016mum},
pages = {49--55},
publisher = {Association for Computing Machinery},
series = {MUM '16},
abstract = {In this paper, we present the S3D-UI Designer --- a tool to create prototypes for 3D displays. Stereoscopic (S3D) displays are quickly becoming popular beyond cinemas and home entertainments. S3D displays can today already be found in mobile phones, public displays, and car dashboards. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. At the same time, prototyping these UIs is challenging, as with traditional techniques, UI elements cannot easily be rendered and positioned in 3D space. In contrast to professional 3D authoring tools, we present a tool targeted towards non-experts to quickly and easily sketch a S3D UI and instantly render it on a 3D display. We report on the design of the tool by means of a workshop and present an evaluation study with 26 participants assessing its usabilty.},
acmid = {3012727},
doi = {10.1145/3012709.3012727},
isbn = {978-1-4503-4860-7},
keywords = {prototyping, stereoscopic 3D, user interfaces},
location = {Rovaniemi, Finland},
numpages = {7},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2016mum.pdf},
}
E. von Zezschwitz, M. Eiband, D. Buschek, S. Oberhuber, A. De Luca, F. Alt, and H. Hussmann. On Quantifying the Effective Password Space of Grid-based Unlock Gestures. In Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia (MUM ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 201–212. doi:10.1145/3012709.3012729
[BibTeX] [Abstract] [PDF]
We present a similarity metric for Android unlock patterns to quantify the effective password space of user-defined gestures. Our metric is the first of its kind to reflect that users choose patterns based on human intuition and interest in geometric properties of the resulting shapes. Applying our metric to a dataset of 506 user-defined patterns reveals very similar shapes that only differ by simple geometric transformations such as rotation. This shrinks the effective password space by 66% and allows informed guessing attacks. Consequently, we present an approach to subtly nudge users to create more diverse patterns by showing background images and animations during pattern creation. Results from a user study (n = 496) show that applying such countermeasures can significantly increase pattern diversity. We conclude with implications for pattern choices and the design of enrollment processes.
@InProceedings{zezschwitz2016mum,
author = {von Zezschwitz, Emanuel and Eiband, Malin and Buschek, Daniel and Oberhuber, Sascha and De Luca, Alexander and Alt, Florian and Hussmann, Heinrich},
booktitle = {{Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{On Quantifying the Effective Password Space of Grid-based Unlock Gestures}},
year = {2016},
address = {New York, NY, USA},
note = {zezschwitz2016mum},
pages = {201--212},
publisher = {Association for Computing Machinery},
series = {MUM '16},
abstract = {We present a similarity metric for Android unlock patterns to quantify the effective password space of user-defined gestures. Our metric is the first of its kind to reflect that users choose patterns based on human intuition and interest in geometric properties of the resulting shapes. Applying our metric to a dataset of 506 user-defined patterns reveals very similar shapes that only differ by simple geometric transformations such as rotation. This shrinks the effective password space by 66% and allows informed guessing attacks. Consequently, we present an approach to subtly nudge users to create more diverse patterns by showing background images and animations during pattern creation. Results from a user study (n = 496) show that applying such countermeasures can significantly increase pattern diversity. We conclude with implications for pattern choices and the design of enrollment processes.},
acmid = {3012729},
doi = {10.1145/3012709.3012729},
isbn = {978-1-4503-4860-7},
keywords = {metric, password space, security, similarity, unlock pattern, user selection},
location = {Rovaniemi, Finland},
numpages = {12},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/zezschwitz2016mum.pdf},
}
Proceedings of the 15th International Conference on Mobile and Ubiquitous MultimediaNew York, NY, USA: Association for Computing Machinery, 2016.
[BibTeX] [PDF]
@Proceedings{alt2016mumproc,
title = {{Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia}},
year = {2016},
address = {New York, NY, USA},
isbn = {978-1-4503-4860-7},
note = {alt2016mumproc},
publisher = {Association for Computing Machinery},
series = {MUM '16},
location = {Rovaniemi, Finland},
timestamp = {2016.12.14},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016mumproc.pdf},
}
F. Steinberger, P. Proppe, R. Schroeter, and F. Alt. CoastMaster: An Ambient Speedometer to Gamify Safe Driving. In Proceedings of the 8th International Conference on Automotive User Interfaces and Interactive Vehicular Applications (Automotive’UI 16), Association for Computing Machinery, New York, NY, USA, 2016, p. 83–90. doi:10.1145/3003715.3005412
[BibTeX] [Abstract] [PDF]
We present CoastMaster, a smartphone application that serves as an ambient speedometer and driving game display. Our work is motivated by the need to re-engage drivers in the driving task, e.g., in situations where manoeuvering the vehicle is straightforward and does not require high levels of engagement. CoastMaster supports drivers during speed limit changes by (a) re-engaging them in the driving task, and; (b) providing feedback on driving behaviour. In a simulator study (N=24), we compare a gamified and a non-gamified interface with regards to user experience, driving performance, and visual distraction. Our results indicate an increase in hedonic quality and driver engagement as well as a decrease in speed violations through the gamified condition. At the same time, the gamified version leads to longer glances towards the display suggesting visual distraction. Our study findings inform specific design recommendations for ambient interfaces and gamified driving.
@InProceedings{steinberger2016autoui,
author = {Steinberger, Fabius and Proppe, Patrick and Schroeter, Ronald and Alt, Florian},
booktitle = {{Proceedings of the 8th International Conference on Automotive User Interfaces and Interactive Vehicular Applications}},
title = {{CoastMaster: An Ambient Speedometer to Gamify Safe Driving}},
year = {2016},
address = {New York, NY, USA},
note = {steinberger2016autoui},
pages = {83--90},
publisher = {Association for Computing Machinery},
series = {Automotive'UI 16},
abstract = {We present CoastMaster, a smartphone application that serves as an ambient speedometer and driving game display. Our work is motivated by the need to re-engage drivers in the driving task, e.g., in situations where manoeuvering the vehicle is straightforward and does not require high levels of engagement. CoastMaster supports drivers during speed limit changes by (a) re-engaging them in the driving task, and; (b) providing feedback on driving behaviour. In a simulator study (N=24), we compare a gamified and a non-gamified interface with regards to user experience, driving performance, and visual distraction. Our results indicate an increase in hedonic quality and driver engagement as well as a decrease in speed violations through the gamified condition. At the same time, the gamified version leads to longer glances towards the display suggesting visual distraction. Our study findings inform specific design recommendations for ambient interfaces and gamified driving.},
acmid = {3005412},
doi = {10.1145/3003715.3005412},
isbn = {978-1-4503-4533-0},
keywords = {Ambient interface, design approach, distraction, gamification, interactive experience, vehicle-based apps},
location = {Ann Arbor, MI, USA},
numpages = {8},
timestamp = {2016.10.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/steinberger2016autoui.pdf},
}
M. Khamis, A. Klimczak, M. Reiss, F. Alt, and A. Bulling. EyeScout: Active Eye Tracking for Position and MovementIndependent Gaze Interaction with Large Public Displays. In Proceedings of the 30th Annual ACM Symposium on User Interface Software & Technology (UIST ’17), Association for Computing Machinery, New York, NY, USA, 2016. doi:10.1145/3126594.3126630
[BibTeX] [Abstract] [PDF]
While gaze holds a lot of promise for hands-free interaction with public displays, remote eye trackers with their confined tracking box restrict users to a single stationary position in front of the display. We present EyeScout, an active eye tracking system that combines an eye tracker mounted on a rail system with a computational method to automatically detect and align the tracker with the user’s lateral movement. EyeScout addresses key limitations of current gaze-enabled large public displays by offering two novel gaze-interaction modes for a single user: In “Walk then Interact” the user can walk up to an arbitrary position in front of the display and interact, while in “Walk and Interact” the user can interact even while on the move. We report on a user study that shows that EyeScout is well perceived by users, extends a public display’s sweet spot into a sweet line, and reduces gaze interaction kick-off time to 3.5 seconds – a 62% improvement over state of the art solutions. We discuss sample applications that demonstrate how EyeScout can enable position and movement-independent gaze interaction with large public displays.
@InProceedings{khamis2017uist,
author = {Khamis, Mohamed and Klimczak, Alexander and Reiss, Martin and Alt, Florian and Bulling, Andreas},
booktitle = {{Proceedings of the 30th Annual ACM Symposium on User Interface Software \& Technology}},
title = {{EyeScout: Active Eye Tracking for Position and MovementIndependent Gaze Interaction with Large Public Displays}},
year = {2016},
address = {New York, NY, USA},
note = {khamis2017uist},
publisher = {Association for Computing Machinery},
series = {UIST '17},
abstract = {While gaze holds a lot of promise for hands-free interaction with public displays, remote eye trackers with their confined tracking box restrict users to a single stationary position in front of the display. We present EyeScout, an active eye tracking system that combines an eye tracker mounted on a rail system with a computational method to automatically detect and align the tracker with the user's lateral movement. EyeScout addresses key limitations of current gaze-enabled large public displays by offering two novel gaze-interaction modes for a single user: In "Walk then Interact" the user can walk up to an arbitrary position in front of the display and interact, while in "Walk and Interact" the user can interact even while on the move. We report on a user study that shows that EyeScout is well perceived by users, extends a public display's sweet spot into a sweet line, and reduces gaze interaction kick-off time to 3.5 seconds -- a 62% improvement over state of the art solutions. We discuss sample applications that demonstrate how EyeScout can enable position and movement-independent gaze interaction with large public displays.},
acmid = {3126630},
doi = {10.1145/3126594.3126630},
isbn = {978-1-4503-4981-9/17/10},
location = {Quebec City, QC, Canada},
numpages = {12},
timestamp = {2016.10.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017uist.pdf},
}
M. Khamis, O. Saltuk, A. Hang, K. Stolz, A. Bulling, and F. Alt. TextPursuits: Using Text for Pursuits-based Interaction and Calibration on Public Displays. In Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 274–285. doi:10.1145/2971648.2971679
[BibTeX] [Abstract] [PDF]
In this paper we show how reading text on large display can be used to enable gaze interaction in public space. Our research is motivated by the fact that much of the content on public displays includes text. Hence, researchers and practitioners could greatly benefit from users being able to spontaneously interact as well as to implicitly calibrate an eye tracker while simply reading this text. In particular, we adapt Pursuits, a technique that correlates users’ eye movements with moving on-screen targets. While prior work used abstract objects or dots as targets, we explore the use of Pursuits with text (read-and-pursue). Thereby we address the challenge that eye movements performed while reading interfere with the pursuit movements. Results from two user studies (N=37) show that Pursuits with text is feasible and can achieve similar accuracy as non text-based pursuit approaches. While calibration is less accurate, it integrates smoothly with reading and allows areas of the display the user is looking at to be identified.
@InProceedings{khamis2016ubicomp,
author = {Khamis, Mohamed and Saltuk, Ozan and Hang, Alina and Stolz, Katharina and Bulling, Andreas and Alt, Florian},
booktitle = {{Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing}},
title = {{TextPursuits: Using Text for Pursuits-based Interaction and Calibration on Public Displays}},
year = {2016},
address = {New York, NY, USA},
note = {khamis2016ubicomp},
pages = {274--285},
publisher = {Association for Computing Machinery},
series = {UbiComp '16},
abstract = {In this paper we show how reading text on large display can be used to enable gaze interaction in public space. Our research is motivated by the fact that much of the content on public displays includes text. Hence, researchers and practitioners could greatly benefit from users being able to spontaneously interact as well as to implicitly calibrate an eye tracker while simply reading this text. In particular, we adapt Pursuits, a technique that correlates users' eye movements with moving on-screen targets. While prior work used abstract objects or dots as targets, we explore the use of Pursuits with text (read-and-pursue). Thereby we address the challenge that eye movements performed while reading interfere with the pursuit movements. Results from two user studies (N=37) show that Pursuits with text is feasible and can achieve similar accuracy as non text-based pursuit approaches. While calibration is less accurate, it integrates smoothly with reading and allows areas of the display the user is looking at to be identified.},
acmid = {2971679},
doi = {10.1145/2971648.2971679},
isbn = {978-1-4503-4461-6},
keywords = {gaze interaction, public displays, smooth pursuit, text},
location = {Heidelberg, Germany},
numpages = {12},
timestamp = {2016.09.18},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016ubicomp.pdf},
}
M. Khamis, F. Alt, and A. Bulling. Challenges and Design Space of Gaze-enabled Public Displays. In Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing (PETMEI ’16), Association for Computing Machinery, New York, NY, USA, 2016. doi:10.1145/2968219.2968342
[BibTeX] [Abstract] [PDF]
Gaze is an attractive modality for public displays, hence the recent years saw an increase in deployments of gaze-enabled public displays. Although gaze has been thoroughly investigated for desktop scenarios, gaze-enabled public displays present new challenges that are unique to this setup. In contrast to desktop settings, public displays (1) cannot afford requiring eye tracker calibration, (2) expect users to interact from different positions, and (3) expect multiple users to interact simultaneously. In this work we discuss these challenges, and explore the design space of gaze-enabled public displays. We conclude by discussing how the current state of research stands wrt.~the identified challenges, and highlight directions for future work.
@InProceedings{khamis2016petmei,
author = {Khamis, Mohamed and Alt, Florian and Bulling, Andreas},
booktitle = {{Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing}},
title = {{Challenges and Design Space of Gaze-enabled Public Displays}},
year = {2016},
address = {New York, NY, USA},
note = {khamis2016petmei},
publisher = {Association for Computing Machinery},
series = {PETMEI '16},
abstract = {Gaze is an attractive modality for public displays, hence the recent years saw an increase in deployments of gaze-enabled public displays. Although gaze has been thoroughly investigated for desktop scenarios, gaze-enabled public displays present new challenges that are unique to this setup. In contrast to desktop settings, public displays (1) cannot afford requiring eye tracker calibration, (2) expect users to interact from different positions, and (3) expect multiple users to interact simultaneously. In this work we discuss these challenges, and explore the design space of gaze-enabled public displays. We conclude by discussing how the current state of research stands wrt.~the identified challenges, and highlight directions for future work.},
doi = {10.1145/2968219.2968342},
location = {Heidelberg, Germany},
numpages = {10},
timestamp = {2016.09.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016petmei.pdf},
}
S. Schneegass, S. Ogando, and F. Alt. Using On-body Displays for Extending the Output of Wearable Devices. In Proceedings of the 2016 ACM International Symposium on Pervasive Displays (PerDis ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 67–74. doi:10.1145/2914920.2915021
[BibTeX] [Abstract] [PDF]
In this work, we explore wearable on-body displays. These displays have the potential of extending the display space of smart watches to the user’s body. Our research is motivated by wearable computing devices moving technology closer to the human. Today, smart watches offer functionalities similar to smart phones, yet at a smaller form factor. To cope with the limited display real-estate we propose to use on-body displays integrated with clothing to extend the available display space. We present a design space for on-body displays and explore users’ location and visualization preferences. We also report on the design and implementation of a prototypical display system. We evaluated the prototype in a lab study with 16 participants, showing that on-body displays perform similar to current off-screen visualization techniques.
@InProceedings{schneegass2016perdis,
author = {Schneegass, Stefan and Ogando, Sophie and Alt, Florian},
booktitle = {{Proceedings of the 2016 ACM International Symposium on Pervasive Displays}},
title = {{Using On-body Displays for Extending the Output of Wearable Devices}},
year = {2016},
address = {New York, NY, USA},
note = {schneegass2016perdis},
pages = {67--74},
publisher = {Association for Computing Machinery},
series = {PerDis '16},
abstract = {In this work, we explore wearable on-body displays. These displays have the potential of extending the display space of smart watches to the user's body. Our research is motivated by wearable computing devices moving technology closer to the human. Today, smart watches offer functionalities similar to smart phones, yet at a smaller form factor. To cope with the limited display real-estate we propose to use on-body displays integrated with clothing to extend the available display space. We present a design space for on-body displays and explore users' location and visualization preferences. We also report on the design and implementation of a prototypical display system. We evaluated the prototype in a lab study with 16 participants, showing that on-body displays perform similar to current off-screen visualization techniques.},
acmid = {2915021},
doi = {10.1145/2914920.2915021},
isbn = {978-1-4503-4366-4},
keywords = {focus + context, on-body display, smart textiles, wearable computing},
location = {Oulu, Finland},
numpages = {8},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2016perdis.pdf},
}
F. Alt and J. Vehns. Opportunistic Deployments: Challenges and Opportunities of Conducting Public Display Research at an Airport. In Proceedings of the 2016 ACM International Symposium on Pervasive Displays (PerDis ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 106–117. doi:10.1145/2914920.2915020
[BibTeX] [Abstract] [PDF]
In this paper, we report on the design, development, and deployment of an interactive shopping display at a major European airport. The ability to manufacture displays in arbitrary size and form factors as well as their networking capabilities allow public displays to be deployed in almost any location and target a huge variety of audiences. At the same time, this makes it difficult for researchers to gather generalizable insights on audience behavior. Rather, findings are often very specific to a particular deployment. We argue that in order to develop a comprehensive understanding of how successful interactive display installations can be created, researchers need to explore an as large variety of situations as possible. We contribute to this understanding by providing insights from a deployment in a security critical environment and involving multiple stakeholders where the audience is encountered in different situations (waiting, passing-by). Our insights are valuable for both researchers and practitioners, operating interactive display deployments.
@InProceedings{alt2016perdis2,
author = {Alt, Florian and Vehns, Julia},
booktitle = {{Proceedings of the 2016 ACM International Symposium on Pervasive Displays}},
title = {{Opportunistic Deployments: Challenges and Opportunities of Conducting Public Display Research at an Airport}},
year = {2016},
address = {New York, NY, USA},
note = {alt2016perdis2},
pages = {106--117},
publisher = {Association for Computing Machinery},
series = {PerDis '16},
abstract = {In this paper, we report on the design, development, and deployment of an interactive shopping display at a major European airport. The ability to manufacture displays in arbitrary size and form factors as well as their networking capabilities allow public displays to be deployed in almost any location and target a huge variety of audiences. At the same time, this makes it difficult for researchers to gather generalizable insights on audience behavior. Rather, findings are often very specific to a particular deployment. We argue that in order to develop a comprehensive understanding of how successful interactive display installations can be created, researchers need to explore an as large variety of situations as possible. We contribute to this understanding by providing insights from a deployment in a security critical environment and involving multiple stakeholders where the audience is encountered in different situations (waiting, passing-by). Our insights are valuable for both researchers and practitioners, operating interactive display deployments.},
acmid = {2915020},
doi = {10.1145/2914920.2915020},
isbn = {978-1-4503-4366-4},
keywords = {audience behavior, deployment-based research, interaction, public displays, shopping},
location = {Oulu, Finland},
numpages = {12},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016perdis2.pdf},
}
M. Baldauf, F. Adegeye, F. Alt, and J. Harms. Your Browser is the Controller: Advanced Web-based Smartphone Remote Controls for Public Screens. In Proceedings of the 2016 ACM International Symposium on Pervasive Displays (PerDis ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 175–181. doi:10.1145/2914920.2915026
[BibTeX] [Abstract] [PDF]
In recent years, a lot of research focused on using smartphones as input devices for distant screens, in many cases by means of native applications. At the same time, prior work often ignored the downsides of native applications for practical usage, such as the need for download and the required installation process. This hampers the spontaneous use of an interactive service. To address the aforementioned drawbacks, we introduce ATREUS, an open-source framework which enables creating and provisioning manifold mobile remote controls as plain web applications. We describe the basic architecture of ATREUS and present four functional remote controls realized using the framework. Two sophisticated controls, the Mini Video and the Smart Lens approach, have been previously implemented as native applications only. Furthermore, we report on lessons learned for realizing web-based remote controls during functional tests and finally present the results of an informal user study.
@InProceedings{baldauf2016perdis,
author = {Baldauf, Matthias and Adegeye, Florence and Alt, Florian and Harms, Johannes},
booktitle = {{Proceedings of the 2016 ACM International Symposium on Pervasive Displays}},
title = {{Your Browser is the Controller: Advanced Web-based Smartphone Remote Controls for Public Screens}},
year = {2016},
address = {New York, NY, USA},
note = {baldauf2016perdis},
pages = {175--181},
publisher = {Association for Computing Machinery},
series = {PerDis '16},
abstract = {In recent years, a lot of research focused on using smartphones as input devices for distant screens, in many cases by means of native applications. At the same time, prior work often ignored the downsides of native applications for practical usage, such as the need for download and the required installation process. This hampers the spontaneous use of an interactive service. To address the aforementioned drawbacks, we introduce ATREUS, an open-source framework which enables creating and provisioning manifold mobile remote controls as plain web applications. We describe the basic architecture of ATREUS and present four functional remote controls realized using the framework. Two sophisticated controls, the Mini Video and the Smart Lens approach, have been previously implemented as native applications only. Furthermore, we report on lessons learned for realizing web-based remote controls during functional tests and finally present the results of an informal user study.},
acmid = {2915026},
doi = {10.1145/2914920.2915026},
isbn = {978-1-4503-4366-4},
keywords = {interaction, public display, remote control, smartphone},
location = {Oulu, Finland},
numpages = {7},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/baldauf2016perdis.pdf},
}
F. Alt, S. Torma, and D. Buschek. Don’t Disturb Me: Understanding Secondary Tasks on Public Displays. In Proceedings of the 2016 ACM International Symposium on Pervasive Displays (PerDis ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 1–12. doi:10.1145/2914920.2915023
[BibTeX] [Abstract] [PDF]
A growing number of displays provide information and applications in public spaces. Most applications today are considered to pose one task to the user, such as navigating a map. In contrast to such primary tasks, secondary tasks have yet received little attention in research, despite practical relevance. For example, a secondary task might occur by displaying special ticket offers to a tourist browsing a city map for attractions. This paper investigates secondary tasks with two key-contributions: First, we describe a design space for secondary tasks on public displays, identifying dimensions of interest to application designers. Second, we present a user study with text entry and mental arithmetic tasks to assess how secondary tasks influence performance in the primary task depending on two main dimensions – difficulty and temporal integration. We report performance (completion times, error rates) and subjective user ratings, such as distraction and frustration. Analysis of gaze data suggests three main strategies of how users switch between primary and secondary tasks. Based on our findings, we conclude with recommendations for designing apps with secondary tasks on public displays.
@InProceedings{alt2016perdis1,
author = {Alt, Florian and Torma, Sarah and Buschek, Daniel},
booktitle = {{Proceedings of the 2016 ACM International Symposium on Pervasive Displays}},
title = {{Don't Disturb Me: Understanding Secondary Tasks on Public Displays}},
year = {2016},
address = {New York, NY, USA},
note = {alt2016perdis1},
pages = {1--12},
publisher = {Association for Computing Machinery},
series = {PerDis '16},
abstract = {A growing number of displays provide information and applications in public spaces. Most applications today are considered to pose one task to the user, such as navigating a map. In contrast to such primary tasks, secondary tasks have yet received little attention in research, despite practical relevance. For example, a secondary task might occur by displaying special ticket offers to a tourist browsing a city map for attractions. This paper investigates secondary tasks with two key-contributions: First, we describe a design space for secondary tasks on public displays, identifying dimensions of interest to application designers. Second, we present a user study with text entry and mental arithmetic tasks to assess how secondary tasks influence performance in the primary task depending on two main dimensions -- difficulty and temporal integration. We report performance (completion times, error rates) and subjective user ratings, such as distraction and frustration. Analysis of gaze data suggests three main strategies of how users switch between primary and secondary tasks. Based on our findings, we conclude with recommendations for designing apps with secondary tasks on public displays.},
acmid = {2915023},
doi = {10.1145/2914920.2915023},
isbn = {978-1-4503-4366-4},
keywords = {mental workload, parallel-task environment, public display, secondary task performance},
location = {Oulu, Finland},
numpages = {12},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016perdis1.pdf},
}
F. Alt, A. Bulling, L. Mecke, and D. Buschek. Attention, Please!: Comparing Features for Measuring Audience Attention Towards Pervasive Displays. In Proceedings of the 2016 ACM Conference on Designing Interactive Systems (DIS ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 823–828. doi:10.1145/2901790.2901897
[BibTeX] [Abstract] [PDF]
Measuring audience attention towards pervasive displays is important but accurate measurement in real time remains a significant sensing challenge. Consequently, researchers and practitioners typically use other features, such as face presence, as a proxy. We provide a principled comparison of the performance of six features and their combinations for measuring attention: face presence, movement trajectory, walking speed, shoulder orientation, head pose, and gaze direction. We implemented a prototype that is capable of capturing this rich set of features from video and depth camera data. Using a controlled lab experiment (N=18) we show that as a single feature, face presence is indeed among the most accurate. We further show that accuracy can be increased through a combination of features (+10.3%), knowledge about the audience (+63.8%), as well as user identities (+69.0%). Our findings are valuable for display providers who want to collect data on display effectiveness or build interactive, responsive apps.
@InProceedings{alt2016dis,
author = {Alt, Florian and Bulling, Andreas and Mecke, Lukas and Buschek, Daniel},
booktitle = {{Proceedings of the 2016 ACM Conference on Designing Interactive Systems}},
title = {{Attention, Please!: Comparing Features for Measuring Audience Attention Towards Pervasive Displays}},
year = {2016},
address = {New York, NY, USA},
note = {alt2016dis},
pages = {823--828},
publisher = {Association for Computing Machinery},
series = {DIS '16},
abstract = {Measuring audience attention towards pervasive displays is important but accurate measurement in real time remains a significant sensing challenge. Consequently, researchers and practitioners typically use other features, such as face presence, as a proxy. We provide a principled comparison of the performance of six features and their combinations for measuring attention: face presence, movement trajectory, walking speed, shoulder orientation, head pose, and gaze direction. We implemented a prototype that is capable of capturing this rich set of features from video and depth camera data. Using a controlled lab experiment (N=18) we show that as a single feature, face presence is indeed among the most accurate. We further show that accuracy can be increased through a combination of features (+10.3%), knowledge about the audience (+63.8%), as well as user identities (+69.0%). Our findings are valuable for display providers who want to collect data on display effectiveness or build interactive, responsive apps.},
acmid = {2901897},
doi = {10.1145/2901790.2901897},
isbn = {978-1-4503-4031-1},
keywords = {audience funnel, interaction, phases, public displays, zones},
location = {Brisbane, QLD, Australia},
numpages = {6},
timestamp = {2016.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016dis.pdf},
}
R. Haeuslschmid, B. Pfleging, and F. Alt. A Design Space to Support the Development of Windshield Applications for the Car. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 5076–5091. doi:10.1145/2858036.2858336
[BibTeX] [Abstract] [PDF]
In this paper we present a design space for interactive windshield displays in vehicles and discuss how this design space can support designers in creating windshield applications for drivers, passengers, and pedestrians. Our work is motivated by numerous examples in other HCI-related areas where seminal design space papers served as a valuable basis to evolve the respective field – most notably mobile devices, automotive user interfaces, and interactive public displays. The presented design space is based on a comprehensive literature review. Furthermore we present a classification of 211 windshield applications, derived from a survey of research projects and commercial products as well as from focus groups. We showcase the utility of our work for designers of windshield applications through two scenarios. Overall, our design space can help building applications for diverse use cases. This includes apps inside and outside the car as well as applications for specific areas (fire fighters, police, ambulance).
@InProceedings{haeuslschmid2016chi,
author = {Haeuslschmid, Renate and Pfleging, Bastian and Alt, Florian},
booktitle = {{Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems}},
title = {{A Design Space to Support the Development of Windshield Applications for the Car}},
year = {2016},
address = {New York, NY, USA},
note = {haeuslschmid2016chi},
pages = {5076--5091},
publisher = {Association for Computing Machinery},
series = {CHI '16},
abstract = {In this paper we present a design space for interactive windshield displays in vehicles and discuss how this design space can support designers in creating windshield applications for drivers, passengers, and pedestrians. Our work is motivated by numerous examples in other HCI-related areas where seminal design space papers served as a valuable basis to evolve the respective field -- most notably mobile devices, automotive user interfaces, and interactive public displays. The presented design space is based on a comprehensive literature review. Furthermore we present a classification of 211 windshield applications, derived from a survey of research projects and commercial products as well as from focus groups. We showcase the utility of our work for designers of windshield applications through two scenarios. Overall, our design space can help building applications for diverse use cases. This includes apps inside and outside the car as well as applications for specific areas (fire fighters, police, ambulance).},
acmid = {2858336},
doi = {10.1145/2858036.2858336},
isbn = {978-1-4503-3362-7},
keywords = {automotive interfaces, design space, head-up display, in-vehicle interfaces, windshield display},
location = {San Jose, California, USA},
numpages = {16},
timestamp = {2016.05.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/haeuslschmid2016chi.pdf},
}
D. Buschek, A. De Luca, and F. Alt. Evaluating the Influence of Targets and Hand Postures on Touch-based Behavioural Biometrics. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 1349–1361. doi:10.1145/2858036.2858165
[BibTeX] [Abstract] [PDF]
Users’ individual differences in their mobile touch behaviour can help to continuously verify identity and protect personal data. However, little is known about the influence of GUI elements and hand postures on such touch biometrics. Thus, we present a metric to measure the amount of user-revealing information that can be extracted from touch targeting interactions and apply it in eight targeting tasks with over 150,000 touches from 24 users in two sessions. We compare touch-to-target offset patterns for four target types and two hand postures. Our analyses reveal that small, compactly shaped targets near screen edges yield the most descriptive touch targeting patterns. Moreover, our results show that thumb touches are more individual than index finger ones. We conclude that touch-based user identification systems should analyse GUI layouts and infer hand postures. We also describe a framework to estimate the usefulness of GUIs for touch biometrics.
@InProceedings{buschek2016chi2,
author = {Buschek, Daniel and De Luca, Alexander and Alt, Florian},
booktitle = {{Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems}},
title = {{Evaluating the Influence of Targets and Hand Postures on Touch-based Behavioural Biometrics}},
year = {2016},
address = {New York, NY, USA},
note = {buschek2016chi2},
pages = {1349--1361},
publisher = {Association for Computing Machinery},
series = {CHI '16},
abstract = {Users' individual differences in their mobile touch behaviour can help to continuously verify identity and protect personal data. However, little is known about the influence of GUI elements and hand postures on such touch biometrics. Thus, we present a metric to measure the amount of user-revealing information that can be extracted from touch targeting interactions and apply it in eight targeting tasks with over 150,000 touches from 24 users in two sessions. We compare touch-to-target offset patterns for four target types and two hand postures. Our analyses reveal that small, compactly shaped targets near screen edges yield the most descriptive touch targeting patterns. Moreover, our results show that thumb touches are more individual than index finger ones. We conclude that touch-based user identification systems should analyse GUI layouts and infer hand postures. We also describe a framework to estimate the usefulness of GUIs for touch biometrics.},
acmid = {2858165},
doi = {10.1145/2858036.2858165},
isbn = {978-1-4503-3362-7},
keywords = {behavioural biometrics, mobile device, touch targeting},
location = {San Jose, California, USA},
numpages = {13},
timestamp = {2016.05.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2016chi2.pdf},
}
D. Buschek, F. Hartmann, E. von Zezschwitz, A. De Luca, and F. Alt. SnapApp: Reducing Authentication Overhead with a Time-Constrained Fast Unlock Option. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 3736–3747. doi:10.1145/2858036.2858164
[BibTeX] [Abstract] [PDF]
We present SnapApp, a novel unlock concept for mobile devices that reduces authentication overhead with a time-constrained quick-access option. SnapApp provides two unlock methods at once: While PIN entry enables full access to the device, users can also bypass authentication with a short sliding gesture (“Snap”). This grants access for a limited amount of time (e.g. 30 seconds). The device then automatically locks itself upon expiration. Our concept further explores limiting the possible number of Snaps in a row, and configuring blacklists for app use during short access (e.g. to exclude banking apps). We discuss opportunities and challenges of this concept based on a 30-day field study with 18 participants, including data logging and experience sampling methods. Snaps significantly reduced unlock times, and our app was perceived to offer a good tradeoff. Conceptual challenges include, for example, supporting users in configuring their blacklists.
@InProceedings{buschek2016chi1,
author = {Buschek, Daniel and Hartmann, Fabian and von Zezschwitz, Emanuel and De Luca, Alexander and Alt, Florian},
booktitle = {{Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems}},
title = {{SnapApp: Reducing Authentication Overhead with a Time-Constrained Fast Unlock Option}},
year = {2016},
address = {New York, NY, USA},
note = {buschek2016chi1},
pages = {3736--3747},
publisher = {Association for Computing Machinery},
series = {CHI '16},
abstract = {We present SnapApp, a novel unlock concept for mobile devices that reduces authentication overhead with a time-constrained quick-access option. SnapApp provides two unlock methods at once: While PIN entry enables full access to the device, users can also bypass authentication with a short sliding gesture ("Snap"). This grants access for a limited amount of time (e.g. 30 seconds). The device then automatically locks itself upon expiration. Our concept further explores limiting the possible number of Snaps in a row, and configuring blacklists for app use during short access (e.g. to exclude banking apps). We discuss opportunities and challenges of this concept based on a 30-day field study with 18 participants, including data logging and experience sampling methods. Snaps significantly reduced unlock times, and our app was perceived to offer a good tradeoff. Conceptual challenges include, for example, supporting users in configuring their blacklists.},
acmid = {2858164},
doi = {10.1145/2858036.2858164},
isbn = {978-1-4503-3362-7},
keywords = {smartphone authentication, time-constrained device access, usable privacy and security},
location = {San Jose, California, USA},
numpages = {12},
timestamp = {2016.05.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2016chi1.pdf},
}
H. Schneider, K. Moser, A. Butz, and F. Alt. Understanding the Mechanics of Persuasive System Design: A Mixed-Method Theory-driven Analysis of Freeletics. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 309–320. doi:10.1145/2858036.2858290
[BibTeX] [Abstract] [PDF]
While we know that persuasive system design matters, we barely understand when persuasive strategies work and why they only work in some cases. We propose an approach to systematically understand and design for motivation, by studying the fundamental building blocks of motivation, according to the theory of planned behavior (TPB): attitude, subjective norm, and perceived control. We quantitatively analyzed (N=643) the attitudes, beliefs, and values of mobile fitness coach users with TPB. Capacity (i.e., perceived ability to exercise) had the biggest effect on users’ motivation. Using individual differences theory, we identified three distinct user groups, namely followers, hedonists, and achievers. With insights from semi-structured interviews (N=5) we derive design implications finding that transformation videos that feature other users’ success stories as well as suggesting an appropriate workout can have positive effects on perceived capacity. Practitioners and researchers can use our theory-based mixed-method research design to better understand user behavior in persuasive applications.
@InProceedings{schneider2016chi,
author = {Schneider, Hanna and Moser, Kilian and Butz, Andreas and Alt, Florian},
booktitle = {{Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems}},
title = {{Understanding the Mechanics of Persuasive System Design: A Mixed-Method Theory-driven Analysis of Freeletics}},
year = {2016},
address = {New York, NY, USA},
note = {schneider2016chi},
pages = {309--320},
publisher = {Association for Computing Machinery},
series = {CHI '16},
abstract = {While we know that persuasive system design matters, we barely understand when persuasive strategies work and why they only work in some cases. We propose an approach to systematically understand and design for motivation, by studying the fundamental building blocks of motivation, according to the theory of planned behavior (TPB): attitude, subjective norm, and perceived control. We quantitatively analyzed (N=643) the attitudes, beliefs, and values of mobile fitness coach users with TPB. Capacity (i.e., perceived ability to exercise) had the biggest effect on users' motivation. Using individual differences theory, we identified three distinct user groups, namely followers, hedonists, and achievers. With insights from semi-structured interviews (N=5) we derive design implications finding that transformation videos that feature other users' success stories as well as suggesting an appropriate workout can have positive effects on perceived capacity. Practitioners and researchers can use our theory-based mixed-method research design to better understand user behavior in persuasive applications.},
acmid = {2858290},
doi = {10.1145/2858036.2858290},
isbn = {978-1-4503-3362-7},
keywords = {behavior change, fitness application, personal values, persuasive technology, theory of planned behavior},
location = {San Jose, California, USA},
numpages = {12},
timestamp = {2016.05.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneider2016chi.pdf},
}
M. Hassib, M. Khamis, S. Schneegass, A. S. Shirazi, and F. Alt. Investigating User Needs for Bio-sensing and Affective Wearables. In Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems (CHI EA ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 1415–1422. doi:10.1145/2851581.2892480
[BibTeX] [Abstract] [PDF]
Bio-sensing wearables are currently advancing to provide users with a lot of information about their physiological and affective states. However, relatively little is known about users’ interest in acquiring, sharing and receiving this information and through which channels and modalities. To close this gap, we report on the results of an online survey (N=109) exploring principle aspects of the design space of wearables such as data types, contexts, feedback modalities and sharing behaviors. Results show that users are interested in obtaining physiological, emotional and cognitive data through modalities beyond traditional touchscreen output. Valence of the information, whether positive or negative affects the sharing behaviors.
@InProceedings{hassib2016chiea,
author = {Hassib, Mariam and Khamis, Mohamed and Schneegass, Stefan and Shirazi, Ali Sahami and Alt, Florian},
booktitle = {{Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems}},
title = {{Investigating User Needs for Bio-sensing and Affective Wearables}},
year = {2016},
address = {New York, NY, USA},
note = {hassib2016chiea},
pages = {1415--1422},
publisher = {Association for Computing Machinery},
series = {CHI EA '16},
abstract = {Bio-sensing wearables are currently advancing to provide users with a lot of information about their physiological and affective states. However, relatively little is known about users' interest in acquiring, sharing and receiving this information and through which channels and modalities. To close this gap, we report on the results of an online survey (N=109) exploring principle aspects of the design space of wearables such as data types, contexts, feedback modalities and sharing behaviors. Results show that users are interested in obtaining physiological, emotional and cognitive data through modalities beyond traditional touchscreen output. Valence of the information, whether positive or negative affects the sharing behaviors.},
acmid = {2892480},
doi = {10.1145/2851581.2892480},
isbn = {978-1-4503-4082-3},
keywords = {cognition, emotion, physiological sensing, wearables},
location = {San Jose, California, USA},
numpages = {8},
timestamp = {2016.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2016chiea.pdf},
}
J. Shi, D. Buschek, and F. Alt. Investigating the Impact of Feedback on Gaming Performance on Motivation to Interact with Public Displays. In Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems (CHI EA ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 1344–1351. doi:10.1145/2851581.2892465
[BibTeX] [Abstract] [PDF]
This paper investigates the influence of feedback about users’ performance on their motivation as they interact with games on displays in public space. Our research is motivated by the fact that games are popular among both researchers and practitioners, due to their ability to attract many users. However, it is widely unclear, which factors impact on how much people play and whether they leave personal information on the display. We investigate different forms of feedback (highscore, real-time score and real-time rank during gameplay) and report on how they influence the behavior of users. Our results are based on data from the deployment of an interactive game in a public space.
@InProceedings{shi2016chiea,
author = {Shi, Jiamin and Buschek, Daniel and Alt, Florian},
booktitle = {{Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems}},
title = {{Investigating the Impact of Feedback on Gaming Performance on Motivation to Interact with Public Displays}},
year = {2016},
address = {New York, NY, USA},
note = {shi2016chiea},
pages = {1344--1351},
publisher = {Association for Computing Machinery},
series = {CHI EA '16},
abstract = {This paper investigates the influence of feedback about users' performance on their motivation as they interact with games on displays in public space. Our research is motivated by the fact that games are popular among both researchers and practitioners, due to their ability to attract many users. However, it is widely unclear, which factors impact on how much people play and whether they leave personal information on the display. We investigate different forms of feedback (highscore, real-time score and real-time rank during gameplay) and report on how they influence the behavior of users. Our results are based on data from the deployment of an interactive game in a public space.},
acmid = {2892465},
doi = {10.1145/2851581.2892465},
isbn = {978-1-4503-4082-3},
keywords = {competition, motivation, public displays, user performance},
location = {San Jose, California, USA},
numpages = {8},
timestamp = {2016.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/shi2016chiea.pdf},
}
M. Khamis, F. Alt, M. Hassib, E. von Zezschwitz, R. Hasholzner, and A. Bulling. GazeTouchPass: Multimodal Authentication Using Gaze and Touch on Mobile Devices. In Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems (CHI EA ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 2156–2164. doi:10.1145/2851581.2892314
[BibTeX] [Abstract] [PDF]
We propose a multimodal scheme, GazeTouchPass, that combines gaze and touch for shoulder-surfing resistant user authentication on mobile devices. GazeTouchPass allows passwords with multiple switches between input modalities during authentication. This requires attackers to simultaneously observe the device screen and the user’s eyes to find the password. We evaluate the security and usability of GazeTouchPass in two user studies. Our findings show that GazeTouchPass is usable and significantly more secure than single-modal authentication against basic and even advanced shoulder-surfing attacks.
@InProceedings{khamis2016chiea,
author = {Khamis, Mohamed and Alt, Florian and Hassib, Mariam and von Zezschwitz, Emanuel and Hasholzner, Regina and Bulling, Andreas},
booktitle = {{Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems}},
title = {{GazeTouchPass: Multimodal Authentication Using Gaze and Touch on Mobile Devices}},
year = {2016},
address = {New York, NY, USA},
note = {khamis2016chiea},
pages = {2156--2164},
publisher = {Association for Computing Machinery},
series = {CHI EA '16},
abstract = {We propose a multimodal scheme, GazeTouchPass, that combines gaze and touch for shoulder-surfing resistant user authentication on mobile devices. GazeTouchPass allows passwords with multiple switches between input modalities during authentication. This requires attackers to simultaneously observe the device screen and the user's eyes to find the password. We evaluate the security and usability of GazeTouchPass in two user studies. Our findings show that GazeTouchPass is usable and significantly more secure than single-modal authentication against basic and even advanced shoulder-surfing attacks.},
acmid = {2892314},
doi = {10.1145/2851581.2892314},
isbn = {978-1-4503-4082-3},
keywords = {gaze gestures, mobile devices, multimodal authentication},
location = {San Jose, California, USA},
numpages = {9},
timestamp = {2016.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016chiea.pdf},
}
J. Shi and F. Alt. The Anonymous Audience Analyzer: Visualizing Audience Behavior in Public Space. In Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems (CHI EA ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 3766–3769. doi:10.1145/2851581.2890256
[BibTeX] [Abstract] [PDF]
With dropping hardware prices, an increasing number of interactive displays is being deployed in public space. To investigate and understand the impact of novel interaction techniques, content, and display properties, researchers and practitioners alike rely on observations of the audience. While in-situ observations are costly in terms of time and effort, video data allows situations in front of the display to be analyzed post-hoc. In many situations, however, video recordings are not possible since the privacy of users needs to be protected. To address this challenge, we present a tool that allows scenes in front of a display to be reconstructed from Kinect data (user position and body posture) and visualized in a virtual environment. In this way, the privacy of the audience can be preserved while allowing display owners to run in-depth investigations of their display installations.
@InProceedings{shi2016chidemo,
author = {Shi, Jiamin and Alt, Florian},
booktitle = {{Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems}},
title = {{The Anonymous Audience Analyzer: Visualizing Audience Behavior in Public Space}},
year = {2016},
address = {New York, NY, USA},
note = {shi2016chidemo},
pages = {3766--3769},
publisher = {Association for Computing Machinery},
series = {CHI EA '16},
abstract = {With dropping hardware prices, an increasing number of interactive displays is being deployed in public space. To investigate and understand the impact of novel interaction techniques, content, and display properties, researchers and practitioners alike rely on observations of the audience. While in-situ observations are costly in terms of time and effort, video data allows situations in front of the display to be analyzed post-hoc. In many situations, however, video recordings are not possible since the privacy of users needs to be protected. To address this challenge, we present a tool that allows scenes in front of a display to be reconstructed from Kinect data (user position and body posture) and visualized in a virtual environment. In this way, the privacy of the audience can be preserved while allowing display owners to run in-depth investigations of their display installations.},
acmid = {2890256},
doi = {10.1145/2851581.2890256},
isbn = {978-1-4503-4082-3},
keywords = {audience behaviour, public displays, virtual reality},
location = {San Jose, California, USA},
numpages = {4},
timestamp = {2016.04.23},
url = {http://www.florian-alt.org/unibw/wp-content/publications/shi2016chidemo.pdf},
}
M. Khamis, F. Alt, and A. Bulling. Challenges and design space of gaze-enabled public displays. In Proceedings of the 2016 acm international joint conference on pervasive and ubiquitous computing: adjunct (UbiComp ’16), Association for Computing Machinery, New York, NY, USA, 2016, p. 1736–1745. doi:10.1145/2968219.2968342
[BibTeX] [Abstract] [PDF]
Gaze is an attractive modality for public displays, hence the recent years saw an increase in deployments of gaze-enabled public displays. Although gaze has been thoroughly investigated for desktop scenarios, gaze-enabled public displays present new challenges that are unique to this setup. In contrast to desktop settings, public displays (1) cannot afford requiring eye tracker calibration, (2) expect users to interact from different positions, and (3) expect multiple users to interact simultaneously. In this work we discuss these challenges, and explore the design space of gaze-enabled public displays. We conclude by discussing how the current state of research stands wrt. the identified challenges, and highlight directions for future work.
@InProceedings{khamis2016ubicompadj,
author = {Khamis, Mohamed and Alt, Florian and Bulling, Andreas},
booktitle = {Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing: Adjunct},
title = {Challenges and design space of gaze-enabled public displays},
year = {2016},
address = {New York, NY, USA},
note = {khamis2016ubicompadj},
pages = {1736–1745},
publisher = {Association for Computing Machinery},
series = {UbiComp '16},
abstract = {Gaze is an attractive modality for public displays, hence the recent years saw an increase in deployments of gaze-enabled public displays. Although gaze has been thoroughly investigated for desktop scenarios, gaze-enabled public displays present new challenges that are unique to this setup. In contrast to desktop settings, public displays (1) cannot afford requiring eye tracker calibration, (2) expect users to interact from different positions, and (3) expect multiple users to interact simultaneously. In this work we discuss these challenges, and explore the design space of gaze-enabled public displays. We conclude by discussing how the current state of research stands wrt. the identified challenges, and highlight directions for future work.},
doi = {10.1145/2968219.2968342},
isbn = {9781450344623},
keywords = {gaze interaction, gaze-enabled displays, public displays},
location = {Heidelberg, Germany},
numpages = {10},
url = {https://doi.org/10.1145/2968219.2968342},
}

2015

F. Alt, A. Bulling, G. Gravanis, and D. Buschek. GravitySpot: Guiding Users in Front of Public Displays Using On-Screen Visual Cues. In Proceedings of the 28th Annual ACM Symposium on User Interface Software &\#38; Technology (UIST ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 47–56. doi:10.1145/2807442.2807490
[BibTeX] [Abstract] [PDF]
Users tend to position themselves in front of interactive public displays in such a way as to best perceive its content. Currently, this sweet spot is implicitly defined by display properties, content, the input modality, as well as space constraints in front of the display. We present GravitySpot – an approach that makes sweet spots flexible by actively guiding users to arbitrary target positions in front of displays using visual cues. Such guidance is beneficial, for example, if a particular input technology only works at a specific distance or if users should be guided towards a non-crowded area of a large display. In two controlled lab studies (n=29) we evaluate different visual cues based on color, shape, and motion, as well as position-to-cue mapping functions. We show that both the visual cues and mapping functions allow for fine-grained control over positioning speed and accuracy. Findings are complemented by observations from a 3-month real-world deployment.
@InProceedings{alt2015uist,
author = {Alt, Florian and Bulling, Andreas and Gravanis, Gino and Buschek, Daniel},
booktitle = {{Proceedings of the 28th Annual ACM Symposium on User Interface Software \&\#38; Technology}},
title = {{GravitySpot: Guiding Users in Front of Public Displays Using On-Screen Visual Cues}},
year = {2015},
address = {New York, NY, USA},
note = {alt2015uist},
pages = {47--56},
publisher = {Association for Computing Machinery},
series = {UIST '15},
abstract = {Users tend to position themselves in front of interactive public displays in such a way as to best perceive its content. Currently, this sweet spot is implicitly defined by display properties, content, the input modality, as well as space constraints in front of the display. We present GravitySpot - an approach that makes sweet spots flexible by actively guiding users to arbitrary target positions in front of displays using visual cues. Such guidance is beneficial, for example, if a particular input technology only works at a specific distance or if users should be guided towards a non-crowded area of a large display. In two controlled lab studies (n=29) we evaluate different visual cues based on color, shape, and motion, as well as position-to-cue mapping functions. We show that both the visual cues and mapping functions allow for fine-grained control over positioning speed and accuracy. Findings are complemented by observations from a 3-month real-world deployment.},
acmid = {2807490},
doi = {10.1145/2807442.2807490},
isbn = {978-1-4503-3779-3},
keywords = {audience behavior, interaction, public displays, sweet spot},
location = {Charlotte, NC, USA},
numpages = {10},
timestamp = {2015.11.18},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2015uist.pdf},
}
P. Panhey, T. Döring, S. Schneegass, D. Wenig, and F. Alt. What People Really Remember: Understanding Cognitive Effects When Interacting with Large Displays. In Proceedings of the 2015 International Conference on Interactive Tabletops & Surfaces (ITS ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 103–106. doi:10.1145/2817721.2817732
[BibTeX] [Abstract] [PDF]
This paper investigates how common interaction techniques for large displays impact on recall in learning tasks. Our work is motivated by results of prior research in different areas that attribute a positive effect of interactivity to cognition. We present findings from a controlled lab experiment with 32 participants comparing mobile phone-based interaction, touch interaction and full-body interaction to a non-interactive baseline. In contrast to prior findings, our results reveal that more movement can negatively influence recall. In particular we show that designers are facing an immanent trade-off between designing engaging interaction through extensive movement and creating memorable content.
@InProceedings{panhey2015its,
author = {Panhey, Philipp and D\"{o}ring, Tanja and Schneegass, Stefan and Wenig, Dirk and Alt, Florian},
booktitle = {{Proceedings of the 2015 International Conference on Interactive Tabletops \& Surfaces}},
title = {{What People Really Remember: Understanding Cognitive Effects When Interacting with Large Displays}},
year = {2015},
address = {New York, NY, USA},
note = {panhey2015its},
pages = {103--106},
publisher = {Association for Computing Machinery},
series = {ITS '15},
abstract = {This paper investigates how common interaction techniques for large displays impact on recall in learning tasks. Our work is motivated by results of prior research in different areas that attribute a positive effect of interactivity to cognition. We present findings from a controlled lab experiment with 32 participants comparing mobile phone-based interaction, touch interaction and full-body interaction to a non-interactive baseline. In contrast to prior findings, our results reveal that more movement can negatively influence recall. In particular we show that designers are facing an immanent trade-off between designing engaging interaction through extensive movement and creating memorable content.},
acmid = {2817732},
doi = {10.1145/2817721.2817732},
isbn = {978-1-4503-3899-8},
keywords = {cognition, interactivity, pervasive displays, recall},
location = {Madeira, Portugal},
numpages = {4},
timestamp = {2015.11.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/panhey2015its.pdf},
}
A. Fedosov, E. Niforatos, F. Alt, and I. Elhart. Supporting Interactivity on a Ski Lift. In Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 Association for Computing Machinery International Symposium on Wearable Computers (UbiComp/ISWC’15 Adjunct), Association for Computing Machinery, New York, NY, USA, 2015, p. 767–770. doi:10.1145/2800835.2807952
[BibTeX] [PDF]
@InProceedings{fedosov2015ubicompadj,
author = {Fedosov, Anton and Niforatos, Evangelos and Alt, Florian and Elhart, Ivan},
booktitle = {{Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 Association for Computing Machinery International Symposium on Wearable Computers}},
title = {{Supporting Interactivity on a Ski Lift}},
year = {2015},
address = {New York, NY, USA},
note = {fedosov2015ubicompadj},
pages = {767–770},
publisher = {Association for Computing Machinery},
series = {UbiComp/ISWC’15 Adjunct},
doi = {10.1145/2800835.2807952},
isbn = {9781450335751},
keywords = {skiing, interaction, public displays, outdoor sports},
location = {Osaka, Japan},
numpages = {4},
timestamp = {2015.09.15},
url = {http://florian-alt.org/unibw/wp-content/publications/fedosov2015ubicompadj.pdf},
}
N. Broy, M. Nefzger, F. Alt, M. Hassib, and A. Schmidt. 3D-HUDD – Developing a Prototyping Tool for 3D Head-Up Displays. In Proceedings of the 15th IFIP TC13 International Conference on Human-Computer Interaction (INTERACT ’15), Association for Computing Machinery, New York, NY, USA, 2015.
[BibTeX] [Abstract] [PDF]
The ability of head-up displays (HUDs) to present informationwithin the usual viewpoint of the user has led to a quick adoption indomains where attention is crucial, such as in the car. As HUDs employ3D technology, further opportunities emerge: information can be structuredand positioned in 3D space thus allowing important information tobe perceived more easily and information can be registered with objectsin the visual scene to communicate a relationship. This allows novel userinterfaces to be built. As of today, however, no prototyping tools exist,that allow 3D UIs for HUDs to be sketched and tested prior to development.To close this gap, we report on the design and development ofthe 3D Head-Up Display Designer (3D-HUDD). In addition, we presentan evaluation of the tool with 24 participants, comparing dierent inputmodalities and depth management modes.
@InProceedings{broy2015interact,
author = {Nora Broy AND Matthias Nefzger AND Florian Alt AND Mariam Hassib AND Albrecht Schmidt},
booktitle = {{Proceedings of the 15th IFIP TC13 International Conference on Human-Computer Interaction}},
title = {{3D-HUDD - Developing a Prototyping Tool for 3D Head-Up Displays}},
year = {2015},
address = {New York, NY, USA},
note = {broy2015interact},
publisher = {Association for Computing Machinery},
series = {INTERACT '15},
abstract = {The ability of head-up displays (HUDs) to present informationwithin the usual viewpoint of the user has led to a quick adoption indomains where attention is crucial, such as in the car. As HUDs employ3D technology, further opportunities emerge: information can be structuredand positioned in 3D space thus allowing important information tobe perceived more easily and information can be registered with objectsin the visual scene to communicate a relationship. This allows novel userinterfaces to be built. As of today, however, no prototyping tools exist,that allow 3D UIs for HUDs to be sketched and tested prior to development.To close this gap, we report on the design and development ofthe 3D Head-Up Display Designer (3D-HUDD). In addition, we presentan evaluation of the tool with 24 participants, comparing dierent inputmodalities and depth management modes.},
location = {Bamberg, Germany},
numpages = {6},
owner = {florianalt},
timestamp = {2015.09.14},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2015interact.pdf},
}
N. Broy, M. Guo, S. Schneegass, B. Pfleging, and F. Alt. Introducing Novel Technologies in the Car: Conducting a Real-world Study to Test 3D Dashboards. In Proceedings of the 7th International Conference on Automotive User Interfaces and Interactive Vehicular Applications (AutomotiveUI ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 179–186. doi:10.1145/2799250.2799280
[BibTeX] [Abstract] [PDF]
Today, the vast majority of research on novel automotive user interface technologies is conducted in the lab, often using driving simulation. While such studies are important in early stages of the design process, we argue that ultimately studies need to be conducted in the real-world in order to investigate all aspects crucial for adoption of novel user interface technologies in commercial vehicles. In this paper, we present a case study that investigates introducing autostereoscopic 3D dashboards into cars. We report on studying this novel technology in the real world, validating and extending findings of prior simulator studies. Furthermore, we provide guidelines for practitioners and researchers to design and conduct real-world studies that minimize the risk for participants while at the same time yielding ecologically valid findings.
@InProceedings{broy2015autoui,
author = {Broy, Nora and Guo, Mengbing and Schneegass, Stefan and Pfleging, Bastian and Alt, Florian},
booktitle = {{Proceedings of the 7th International Conference on Automotive User Interfaces and Interactive Vehicular Applications}},
title = {{Introducing Novel Technologies in the Car: Conducting a Real-world Study to Test 3D Dashboards}},
year = {2015},
address = {New York, NY, USA},
note = {broy2015autoui},
pages = {179--186},
publisher = {Association for Computing Machinery},
series = {AutomotiveUI '15},
abstract = {Today, the vast majority of research on novel automotive user interface technologies is conducted in the lab, often using driving simulation. While such studies are important in early stages of the design process, we argue that ultimately studies need to be conducted in the real-world in order to investigate all aspects crucial for adoption of novel user interface technologies in commercial vehicles. In this paper, we present a case study that investigates introducing autostereoscopic 3D dashboards into cars. We report on studying this novel technology in the real world, validating and extending findings of prior simulator studies. Furthermore, we provide guidelines for practitioners and researchers to design and conduct real-world studies that minimize the risk for participants while at the same time yielding ecologically valid findings.},
acmid = {2799280},
doi = {10.1145/2799250.2799280},
isbn = {978-1-4503-3736-6},
keywords = {automotive UIs, real world study, stereoscopic 3D},
location = {Nottingham, United Kingdom},
numpages = {8},
timestamp = {2015.09.14},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2015autoui.pdf},
}
M. Khamis, A. Bulling, and F. Alt. Tackling Challenges of Interactive Public Displays Using Gaze. In Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 Association for Computing Machinery International Symposium on Wearable Computers (UbiComp/ISWC’15 Adjunct), Association for Computing Machinery, New York, NY, USA, 2015, p. 763–766. doi:10.1145/2800835.2807951
[BibTeX] [Abstract] [PDF]
Falling hardware prices led to a widespread use of public displays. Common interaction techniques for such displays currently include touch, mid-air, or smartphone-based interaction. While these techniques are well understood from a technical perspective, several remaining challenges hinder the uptake of interactive displays among passersby. In this paper we propose addressing major public display challenges through gaze as a novel interaction modality. We discuss why gaze-based interaction can tackle these challenges effectively and discuss how solutions can be technically realized. Furthermore, we summarize state-of-the-art eye tracking techniques that show particular promise in the area of public displays.
@InProceedings{khamis2015pdapps,
author = {Khamis, Mohamed and Bulling, Andreas and Alt, Florian},
booktitle = {{Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 Association for Computing Machinery International Symposium on Wearable Computers}},
title = {{Tackling Challenges of Interactive Public Displays Using Gaze}},
year = {2015},
address = {New York, NY, USA},
note = {khamis2015pdapps},
pages = {763--766},
publisher = {Association for Computing Machinery},
series = {UbiComp/ISWC'15 Adjunct},
abstract = {Falling hardware prices led to a widespread use of public displays. Common interaction techniques for such displays currently include touch, mid-air, or smartphone-based interaction. While these techniques are well understood from a technical perspective, several remaining challenges hinder the uptake of interactive displays among passersby. In this paper we propose addressing major public display challenges through gaze as a novel interaction modality. We discuss why gaze-based interaction can tackle these challenges effectively and discuss how solutions can be technically realized. Furthermore, we summarize state-of-the-art eye tracking techniques that show particular promise in the area of public displays.},
acmid = {2807951},
doi = {10.1145/2800835.2807951},
isbn = {978-1-4503-3575-1},
keywords = {digital signage, gaze, gaze-based interaction, pervasive displays, public displays},
location = {Osaka, Japan},
numpages = {4},
timestamp = {2015.09.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2015pdapps.pdf},
}
M. Khamis, F. Alt, and A. Bulling. A Field Study on Spontaneous Gaze-based Interaction with a Public Display Using Pursuits. In Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 Association for Computing Machinery International Symposium on Wearable Computers (UbiComp/ISWC’15 Adjunct), Association for Computing Machinery, New York, NY, USA, 2015, p. 863–872. doi:10.1145/2800835.2804335
[BibTeX] [Abstract] [PDF]
Smooth pursuit eye movements were recently introduced as a promising technique for calibration-free and thus spontaneous and natural gaze interaction. While pursuits have been evaluated in controlled laboratory studies, the technique has not yet been evaluated with respect to usability in the wild. We report on a field study in which we deployed a game on a public display where participants used pursuits to select fish moving in linear and circular trajectories at different speeds. The study ran for two days in a busy computer lab resulting in a total of 56 interactions. Results from our study show that linear trajectories are statistically faster to select via pursuits than circular trajectories. We also found that pursuits is well perceived by users who find it fast and responsive.
@InProceedings{khamis2015petmei,
author = {Khamis, Mohamed and Alt, Florian and Bulling, Andreas},
booktitle = {{Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 Association for Computing Machinery International Symposium on Wearable Computers}},
title = {{A Field Study on Spontaneous Gaze-based Interaction with a Public Display Using Pursuits}},
year = {2015},
address = {New York, NY, USA},
note = {khamis2015petmei},
pages = {863--872},
publisher = {Association for Computing Machinery},
series = {UbiComp/ISWC'15 Adjunct},
abstract = {Smooth pursuit eye movements were recently introduced as a promising technique for calibration-free and thus spontaneous and natural gaze interaction. While pursuits have been evaluated in controlled laboratory studies, the technique has not yet been evaluated with respect to usability in the wild. We report on a field study in which we deployed a game on a public display where participants used pursuits to select fish moving in linear and circular trajectories at different speeds. The study ran for two days in a busy computer lab resulting in a total of 56 interactions. Results from our study show that linear trajectories are statistically faster to select via pursuits than circular trajectories. We also found that pursuits is well perceived by users who find it fast and responsive.},
acmid = {2804335},
doi = {10.1145/2800835.2804335},
isbn = {978-1-4503-3575-1},
keywords = {field study, pervasive displays, public displays, pursuits, smooth pursuit eye movement},
location = {Osaka, Japan},
numpages = {10},
timestamp = {2015.09.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2015petmei.pdf},
}
D. Buschek, I. Just, B. Fritzsche, and F. Alt. Make Me Laugh: A Recommendation System for Humoristic Content on the World Wide Web. In Proceedings of Mensch and Computer 2015 (), 2015.
[BibTeX] [Abstract] [PDF]
Humoristic content is an inherent part of the World Wide Web and increasingly consumed for micro-entertainment. However, humor is often highly individual and depends on background knowledge and context. This paper presents an approach to recommend humoristic content fitting each individual user’s taste and interests. In a field study with 150 participants over four weeks, users rated content with a 0-10 scale on a humor website. Based on this data, we train and apply a Collaborative Filtering (CF) algorithm to assess individual humor and recommend fitting content. Our study shows that users rate recommended content 22.6% higher than randomly chosen content.
@InProceedings{buschek2015muc,
author = {Buschek, Daniel and Just, Ingo and Fritzsche, Benjamin AND Alt, Florian},
title = {{Make Me Laugh: A Recommendation System for Humoristic Content on the World Wide Web}},
booktitle = {{Proceedings of Mensch and Computer 2015}},
year = {2015},
note = {buschek2015muc},
abstract = {Humoristic content is an inherent part of the World Wide Web and increasingly consumed for micro-entertainment. However, humor is often highly individual and depends on background knowledge and context. This paper presents an approach to recommend humoristic content fitting each individual user's taste and interests. In a field study with 150 participants over four weeks, users rated content with a 0-10 scale on a humor website. Based on this data, we train and apply a Collaborative Filtering (CF) algorithm to assess individual humor and recommend fitting content. Our study shows that users rate recommended content 22.6% higher than randomly chosen content.},
location = {Stuttgart, Germany},
numpages = {10},
owner = {florian},
timestamp = {2015.09.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015muc.pdf},
}
F. Alt, S. Schneegass, A. S. Shirazi, M. Hassib, and A. Bulling. Graphical Passwords in the Wild: Understanding How Users Choose Pictures and Passwords in Image-based Authentication Schemes. In Proceedings of the 17th International Conference on Human-Computer Interaction with Mobile Devices and Services (MobileHCI ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 316–322. doi:10.1145/2785830.2785882
[BibTeX] [Abstract] [PDF]
Common user authentication methods on smartphones, such as lock patterns, PINs, or passwords, impose a trade-off between security and password memorability. Image-based passwords were proposed as a secure and usable alternative. As of today, however, it remains unclear how such schemes are used in the wild. We present the first study to investigate how image-based passwords are used over long periods of time in the real world. Our analyses are based on data from 2318 unique devices collected over more than one year using a custom application released in the Android Play store. We present an in-depth analysis of what kind of images users select, how they define their passwords, and how secure these passwords are. Our findings provide valuable insights into real-world use of image-based passwords and inform the design of future graphical authentication schemes.
@InProceedings{alt2015mobilehci,
author = {Alt, Florian and Schneegass, Stefan and Shirazi, Alireza Sahami and Hassib, Mariam and Bulling, Andreas},
booktitle = {{Proceedings of the 17th International Conference on Human-Computer Interaction with Mobile Devices and Services}},
title = {{Graphical Passwords in the Wild: Understanding How Users Choose Pictures and Passwords in Image-based Authentication Schemes}},
year = {2015},
address = {New York, NY, USA},
note = {alt2015mobilehci},
pages = {316--322},
publisher = {Association for Computing Machinery},
series = {MobileHCI '15},
abstract = {Common user authentication methods on smartphones, such as lock patterns, PINs, or passwords, impose a trade-off between security and password memorability. Image-based passwords were proposed as a secure and usable alternative. As of today, however, it remains unclear how such schemes are used in the wild. We present the first study to investigate how image-based passwords are used over long periods of time in the real world. Our analyses are based on data from 2318 unique devices collected over more than one year using a custom application released in the Android Play store. We present an in-depth analysis of what kind of images users select, how they define their passwords, and how secure these passwords are. Our findings provide valuable insights into real-world use of image-based passwords and inform the design of future graphical authentication schemes.},
acmid = {2785882},
doi = {10.1145/2785830.2785882},
isbn = {978-1-4503-3652-9},
keywords = {Graphical passwords, images, security},
location = {Copenhagen, Denmark},
numpages = {7},
timestamp = {2015.08.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2015mobilehci.pdf},
}
D. Buschek, A. De Luca, and F. Alt. There is More to Typing Than Speed: Expressive Mobile Touch Keyboards via Dynamic Font Personalisation. In Proceedings of the 17th International Conference on Human-Computer Interaction with Mobile Devices and Services (MobileHCI ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 125–130. doi:10.1145/2785830.2785844
[BibTeX] [Abstract] [PDF]
Typing is a common task on mobile devices and has been widely addressed in HCI research, mostly regarding quantitative factors such as error rates and speed. Qualitative aspects, like personal expressiveness, have received less attention. This paper makes individual typing behaviour visible to the users to render mobile typing more personal and expressive in varying contexts: We introduce a dynamic font personalisation framework, TapScript, which adapts a finger-drawn font according to user behaviour and context, such as finger placement, device orientation and movements – resulting in a handwritten-looking font. We implemented TapScript for evaluation with an online survey (N=91) and a field study with a chat app (N=11). Looking at resulting fonts, survey participants distinguished pairs of typists with 84.5% accuracy and walking/sitting with 94.8%. Study participants perceived fonts as individual and the chat experience as personal. They also made creative explicit use of font adaptations.
@InProceedings{buschek2015mobilehci,
author = {Buschek, Daniel and De Luca, Alexander and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Human-Computer Interaction with Mobile Devices and Services}},
title = {{There is More to Typing Than Speed: Expressive Mobile Touch Keyboards via Dynamic Font Personalisation}},
year = {2015},
address = {New York, NY, USA},
note = {buschek2015mobilehci},
pages = {125--130},
publisher = {Association for Computing Machinery},
series = {MobileHCI '15},
abstract = {Typing is a common task on mobile devices and has been widely addressed in HCI research, mostly regarding quantitative factors such as error rates and speed. Qualitative aspects, like personal expressiveness, have received less attention. This paper makes individual typing behaviour visible to the users to render mobile typing more personal and expressive in varying contexts: We introduce a dynamic font personalisation framework, TapScript, which adapts a finger-drawn font according to user behaviour and context, such as finger placement, device orientation and movements - resulting in a handwritten-looking font. We implemented TapScript for evaluation with an online survey (N=91) and a field study with a chat app (N=11). Looking at resulting fonts, survey participants distinguished pairs of typists with 84.5% accuracy and walking/sitting with 94.8%. Study participants perceived fonts as individual and the chat experience as personal. They also made creative explicit use of font adaptations.},
acmid = {2785844},
doi = {10.1145/2785830.2785844},
isbn = {978-1-4503-3652-9},
keywords = {Font Personalisation, Mobile, Touch Typing},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2015.08.23},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015mobilehci.pdf},
}
D. Buschek, A. Auch, and F. Alt. A Toolkit for Analysis and Prediction of Touch Targeting Behaviour on Mobile Websites. In Proceedings of the 7th ACM SIGCHI Symposium on Engineering Interactive Computing Systems (EICS ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 54–63. doi:10.1145/2774225.2774851
[BibTeX] [Abstract] [PDF]
Touch interaction on mobile devices suffers from several problems, such as the thumb’s limited reach or the occlusion of targets by the finger. This leads to offsets between the user’s intended touch location and the actual location sensed by the device. Recent research has modelled such offset patterns to analyse and predict touch targeting behaviour. However, these models have only been applied in lab experiments for specific tasks (typing, pointing, targeting games). In contrast, their applications to websites are yet unexplored. To close this gap, this paper explores the potential of touch modelling for the mobile web: We present a toolkit which allows web developers to collect and analyse touch interactions with their websites. Our system can learn about users’ targeting patterns to simulate expected touch interactions and help identify potential usability issues for future versions of the website prior to deployment. We train models on data collected in a field experiment with 50 participants in a shopping scenario. Our analyses show that the resulting models capture interesting behavioural patterns, reveal insights into user-specific behaviour, and enable predictions of expected error rates for individual interface elements.
@InProceedings{buschek2015eics,
author = {Buschek, Daniel and Auch, Alexander and Alt, Florian},
booktitle = {{Proceedings of the 7th ACM SIGCHI Symposium on Engineering Interactive Computing Systems}},
title = {{A Toolkit for Analysis and Prediction of Touch Targeting Behaviour on Mobile Websites}},
year = {2015},
address = {New York, NY, USA},
note = {buschek2015eics},
pages = {54--63},
publisher = {Association for Computing Machinery},
series = {EICS '15},
abstract = {Touch interaction on mobile devices suffers from several problems, such as the thumb's limited reach or the occlusion of targets by the finger. This leads to offsets between the user's intended touch location and the actual location sensed by the device. Recent research has modelled such offset patterns to analyse and predict touch targeting behaviour. However, these models have only been applied in lab experiments for specific tasks (typing, pointing, targeting games). In contrast, their applications to websites are yet unexplored. To close this gap, this paper explores the potential of touch modelling for the mobile web: We present a toolkit which allows web developers to collect and analyse touch interactions with their websites. Our system can learn about users' targeting patterns to simulate expected touch interactions and help identify potential usability issues for future versions of the website prior to deployment. We train models on data collected in a field experiment with 50 participants in a shopping scenario. Our analyses show that the resulting models capture interesting behavioural patterns, reveal insights into user-specific behaviour, and enable predictions of expected error rates for individual interface elements.},
acmid = {2774851},
doi = {10.1145/2774225.2774851},
isbn = {978-1-4503-3646-8},
keywords = {mobile, targeting, toolkit, touch, user model, web},
location = {Duisburg, Germany},
numpages = {10},
timestamp = {2015.06.23},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015eics.pdf},
}
N. Memarovic, S. Clinch, and F. Alt. Understanding Display Blindness in Future Display Deployments. In Proceedings of the 2015 ACM International Symposium on Pervasive Displays (PerDis ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 7–14. doi:10.1145/2757710.2757719
[BibTeX] [Abstract] [PDF]
Digital displays are heralded as a transformative medium for communication. However, a known challenge in the domain is that of display blindness in which passersby pay little or no attention to public displays. This phenomenon has been a major motivation for much of the research on public displays. However, since the early observations, little has been done to develop our understanding of display blindness – for example, to identify determining factors or propose appropriate metrics. Hence, the degree to which developments in signage form, content, and interaction address display blindness remains unclear. In this paper we examine and categorize current approaches to studying and addressing display blindness. Based on our analysis we identify open questions in the research space, including the impact of display physicality and audience differences, relationships with other observed effects, the impact of research interventions, and selection of appropriate metrics. The goal of this paper is to start a discussion within the community on the topic, and to inform the design of future research.
@InProceedings{memarovic2015perdis,
author = {Memarovic, Nemanja and Clinch, Sarah and Alt, Florian},
booktitle = {{Proceedings of the 2015 ACM International Symposium on Pervasive Displays}},
title = {{Understanding Display Blindness in Future Display Deployments}},
year = {2015},
address = {New York, NY, USA},
note = {memarovic2015perdis},
pages = {7--14},
publisher = {Association for Computing Machinery},
series = {PerDis '15},
abstract = {Digital displays are heralded as a transformative medium for communication. However, a known challenge in the domain is that of display blindness in which passersby pay little or no attention to public displays. This phenomenon has been a major motivation for much of the research on public displays. However, since the early observations, little has been done to develop our understanding of display blindness -- for example, to identify determining factors or propose appropriate metrics. Hence, the degree to which developments in signage form, content, and interaction address display blindness remains unclear. In this paper we examine and categorize current approaches to studying and addressing display blindness. Based on our analysis we identify open questions in the research space, including the impact of display physicality and audience differences, relationships with other observed effects, the impact of research interventions, and selection of appropriate metrics. The goal of this paper is to start a discussion within the community on the topic, and to inform the design of future research.},
acmid = {2757719},
doi = {10.1145/2757710.2757719},
isbn = {978-1-4503-3608-6},
keywords = {Display blindness, interaction blindness, public displays},
location = {Saarbruecken, Germany},
numpages = {8},
timestamp = {2015.06.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2015perdis.pdf},
}
T. Dingler, M. Funk, and F. Alt. Interaction Proxemics: Combining Physical Spaces for Seamless Gesture Interaction. In Proceedings of the 2015 ACM International Symposium on Pervasive Displays (PerDis ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 107–114. doi:10.1145/2757710.2757722
[BibTeX] [Abstract] [PDF]
Touch and gesture input have become popular for display interaction. While applications usually focus on one particular input technology, we set out to adjust the interaction modality based on the proximity of users to the screen. Therefore, we built a system which combines technology-transparent interaction spaces across 4 interaction zones: touch, fine-grained, general, and coarse gestures. In a user study, participants performed a pointing task within and across these zones. Results show that zone transitions are most feasible up to 2m from the screen. Hence, applications can map functionality across different interaction zones, thereby providing additional interaction dimensions and decreasing the complexity of the gesture set. We collected subjective feedback and present a user-defined gesture set for performing a series of standard tasks across different interaction zones. Seamless transition between these spaces is essential to create a consistent interaction experience; finally, we discuss characteristics of systems that take into account user proxemics as input modality.
@InProceedings{dingler2015perdis,
author = {Dingler, Tilman and Funk, Markus and Alt, Florian},
booktitle = {{Proceedings of the 2015 ACM International Symposium on Pervasive Displays}},
title = {{Interaction Proxemics: Combining Physical Spaces for Seamless Gesture Interaction}},
year = {2015},
address = {New York, NY, USA},
note = {dingler2015perdis},
pages = {107--114},
publisher = {Association for Computing Machinery},
series = {PerDis '15},
abstract = {Touch and gesture input have become popular for display interaction. While applications usually focus on one particular input technology, we set out to adjust the interaction modality based on the proximity of users to the screen. Therefore, we built a system which combines technology-transparent interaction spaces across 4 interaction zones: touch, fine-grained, general, and coarse gestures. In a user study, participants performed a pointing task within and across these zones. Results show that zone transitions are most feasible up to 2m from the screen. Hence, applications can map functionality across different interaction zones, thereby providing additional interaction dimensions and decreasing the complexity of the gesture set. We collected subjective feedback and present a user-defined gesture set for performing a series of standard tasks across different interaction zones. Seamless transition between these spaces is essential to create a consistent interaction experience; finally, we discuss characteristics of systems that take into account user proxemics as input modality.},
acmid = {2757722},
doi = {10.1145/2757710.2757722},
isbn = {978-1-4503-3608-6},
keywords = {Interaction, distance, gestures, proxemics},
location = {Saarbruecken, Germany},
numpages = {8},
timestamp = {2015.06.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/dingler2015perdis.pdf},
}
A. Colley, L. Ventä-Olkkonen, F. Alt, and J. Häkkilä. Insights from Deploying See-Through Augmented Reality Signage in the Wild. In Proceedings of the 2015 ACM International Symposium on Pervasive Displays (PerDis ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 179–185. doi:10.1145/2757710.2757730
[BibTeX] [Abstract] [PDF]
Typically the key challenges with interactive digital signage are (1) interaction times are short (usually in the order of seconds), (2) interaction needs to be very easy to understand, and (3) interaction needs to provide a benefit that justifies the effort to engage. To tackle these challenges, we propose a see-through augmented reality application for digital signage that enables passersby to observe the area behind the display, augmented with useful data. We report on the development and deployment of our application in two public settings: a public library and a supermarket. Based on observations of 261 (library) and 661 (supermarket) passersby and 14 interviews, we provide early insights and implications for application designers. Our results show a significant increase in attention: the see-through signage was noticed by 46% of the people, compared to 14% with the non-see through version. Furthermore, findings indicate that to best benefit the passersby, the AR displays should clearly communicate their purpose.
@InProceedings{colley2015perdis,
author = {Colley, Ashley and Vent\"{a}-Olkkonen, Leena and Alt, Florian and H\"{a}kkil\"{a}, Jonna},
booktitle = {{Proceedings of the 2015 ACM International Symposium on Pervasive Displays}},
title = {{Insights from Deploying See-Through Augmented Reality Signage in the Wild}},
year = {2015},
address = {New York, NY, USA},
note = {colley2015perdis},
pages = {179--185},
publisher = {Association for Computing Machinery},
series = {PerDis '15},
abstract = {Typically the key challenges with interactive digital signage are (1) interaction times are short (usually in the order of seconds), (2) interaction needs to be very easy to understand, and (3) interaction needs to provide a benefit that justifies the effort to engage. To tackle these challenges, we propose a see-through augmented reality application for digital signage that enables passersby to observe the area behind the display, augmented with useful data. We report on the development and deployment of our application in two public settings: a public library and a supermarket. Based on observations of 261 (library) and 661 (supermarket) passersby and 14 interviews, we provide early insights and implications for application designers. Our results show a significant increase in attention: the see-through signage was noticed by 46% of the people, compared to 14% with the non-see through version. Furthermore, findings indicate that to best benefit the passersby, the AR displays should clearly communicate their purpose.},
acmid = {2757730},
doi = {10.1145/2757710.2757730},
isbn = {978-1-4503-3608-6},
keywords = {AR, attention, digital signage, interaction, public displays},
location = {Saarbruecken, Germany},
numpages = {7},
timestamp = {2015.06.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/colley2015perdis.pdf},
}
D. Buschek, A. De Luca, and F. Alt. Improving Accuracy, Applicability and Usability of Keystroke Biometrics on Mobile Touchscreen Devices. In Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems (CHI ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 1393–1402. doi:10.1145/2702123.2702252
[BibTeX] [Abstract] [PDF]
Authentication methods can be improved by considering implicit, individual behavioural cues. In particular, verifying users based on typing behaviour has been widely studied with physical keyboards. On mobile touchscreens, the same concepts have been applied with little adaptations so far. This paper presents the first reported study on mobile keystroke biometrics which compares touch-specific features between three different hand postures and evaluation schemes. Based on 20.160 password entries from a study with 28 participants over two weeks, we show that including spatial touch features reduces implicit authentication equal error rates (EER) by 26.4 – 36.8% relative to the previously used temporal features. We also show that authentication works better for some hand postures than others. To improve applicability and usability, we further quantify the influence of common evaluation assumptions: known attacker data, training and testing on data from a single typing session, and fixed hand postures. We show that these practices can lead to overly optimistic evaluations. In consequence, we describe evaluation recommendations, a probabilistic framework to handle unknown hand postures, and ideas for further improvements.
@InProceedings{buschek2015chi,
author = {Buschek, Daniel and De Luca, Alexander and Alt, Florian},
booktitle = {{Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems}},
title = {{Improving Accuracy, Applicability and Usability of Keystroke Biometrics on Mobile Touchscreen Devices}},
year = {2015},
address = {New York, NY, USA},
note = {buschek2015chi},
pages = {1393--1402},
publisher = {Association for Computing Machinery},
series = {CHI '15},
abstract = {Authentication methods can be improved by considering implicit, individual behavioural cues. In particular, verifying users based on typing behaviour has been widely studied with physical keyboards. On mobile touchscreens, the same concepts have been applied with little adaptations so far. This paper presents the first reported study on mobile keystroke biometrics which compares touch-specific features between three different hand postures and evaluation schemes. Based on 20.160 password entries from a study with 28 participants over two weeks, we show that including spatial touch features reduces implicit authentication equal error rates (EER) by 26.4 - 36.8% relative to the previously used temporal features. We also show that authentication works better for some hand postures than others. To improve applicability and usability, we further quantify the influence of common evaluation assumptions: known attacker data, training and testing on data from a single typing session, and fixed hand postures. We show that these practices can lead to overly optimistic evaluations. In consequence, we describe evaluation recommendations, a probabilistic framework to handle unknown hand postures, and ideas for further improvements.},
acmid = {2702252},
doi = {10.1145/2702123.2702252},
isbn = {978-1-4503-3145-6},
keywords = {biometrics, keystroke dynamics, mobile, touch},
location = {Seoul, Republic of Korea},
numpages = {10},
timestamp = {2015.04.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015chi.pdf},
}
M. Pfeiffer, T. Dünte, S. Schneegass, F. Alt, and M. Rohs. Cruise Control for Pedestrians: Controlling Walking Direction Using Electrical Muscle Stimulation. In Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems (CHI ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 2505–2514. doi:10.1145/2702123.2702190
[BibTeX] [Abstract] [PDF]
Pedestrian navigation systems require users to perceive, interpret, and react to navigation information. This can tax cognition as navigation information competes with information from the real world. We propose actuated navigation, a new kind of pedestrian navigation in which the user does not need to attend to the navigation task at all. An actuation signal is directly sent to the human motor system to influence walking direction. To achieve this goal we stimulate the sartorius muscle using electrical muscle stimulation. The rotation occurs during the swing phase of the leg and can easily be counteracted. The user therefore stays in control. We discuss the properties of actuated navigation and present a lab study on identifying basic parameters of the technique as well as an outdoor study in a park. The results show that our approach changes a user’s walking direction by about 16°/m on average and that the system can successfully steer users in a park with crowded areas, distractions, obstacles, and uneven ground.
@InProceedings{pfeiffer2015chi,
author = {Pfeiffer, Max and D\"{u}nte, Tim and Schneegass, Stefan and Alt, Florian and Rohs, Michael},
booktitle = {{Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems}},
title = {{Cruise Control for Pedestrians: Controlling Walking Direction Using Electrical Muscle Stimulation}},
year = {2015},
address = {New York, NY, USA},
note = {pfeiffer2015chi},
pages = {2505--2514},
publisher = {Association for Computing Machinery},
series = {CHI '15},
abstract = {Pedestrian navigation systems require users to perceive, interpret, and react to navigation information. This can tax cognition as navigation information competes with information from the real world. We propose actuated navigation, a new kind of pedestrian navigation in which the user does not need to attend to the navigation task at all. An actuation signal is directly sent to the human motor system to influence walking direction. To achieve this goal we stimulate the sartorius muscle using electrical muscle stimulation. The rotation occurs during the swing phase of the leg and can easily be counteracted. The user therefore stays in control. We discuss the properties of actuated navigation and present a lab study on identifying basic parameters of the technique as well as an outdoor study in a park. The results show that our approach changes a user's walking direction by about 16°/m on average and that the system can successfully steer users in a park with crowded areas, distractions, obstacles, and uneven ground.},
acmid = {2702190},
doi = {10.1145/2702123.2702190},
isbn = {978-1-4503-3145-6},
keywords = {actuated navigation, electrical muscle stimulation, haptic feedback, pedestrian navigation, wearable devices},
location = {Seoul, Republic of Korea},
numpages = {10},
timestamp = {2015.04.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeiffer2015chi.pdf},
}
D. Buschek, M. Spitzer, and F. Alt. Video-Recording Your Life: User Perception and Experiences. In Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems (CHI EA ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 2223–2228. doi:10.1145/2702613.2732743
[BibTeX] [Abstract] [PDF]
Video recording is becoming an integral part of our daily activities: Action cams and wearable cameras allow us to capture scenes of our daily life effortlessly. This trend generates vast amounts of video material impossible to review manually. However, these recordings also contain a lot of information potentially interesting to the recording individual and to others. Such videos can provide a meaningful summary of the day, serving as a digital extension to the user’s human memory. They might also be interesting to others as tutorials (e.g. how to change a flat tyre). As a first step towards this vision, we present a survey assessing the users’ view and their video recording behavior. Findings were used to inform the design of a prototype based on off-the-shelf components, which allows users to create meaningful video clips of their daily activities in an automated manner by using their phone and any wearable camera. We conclude with a preliminary, qualitative study showing the feasibility and potential of the approach and sketch future research directions.
@InProceedings{buschek2015chiea,
author = {Buschek, Daniel and Spitzer, Michael and Alt, Florian},
booktitle = {{Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems}},
title = {{Video-Recording Your Life: User Perception and Experiences}},
year = {2015},
address = {New York, NY, USA},
note = {buschek2015chiea},
pages = {2223--2228},
publisher = {Association for Computing Machinery},
series = {CHI EA '15},
abstract = {Video recording is becoming an integral part of our daily activities: Action cams and wearable cameras allow us to capture scenes of our daily life effortlessly. This trend generates vast amounts of video material impossible to review manually. However, these recordings also contain a lot of information potentially interesting to the recording individual and to others. Such videos can provide a meaningful summary of the day, serving as a digital extension to the user's human memory. They might also be interesting to others as tutorials (e.g. how to change a flat tyre). As a first step towards this vision, we present a survey assessing the users' view and their video recording behavior. Findings were used to inform the design of a prototype based on off-the-shelf components, which allows users to create meaningful video clips of their daily activities in an automated manner by using their phone and any wearable camera. We conclude with a preliminary, qualitative study showing the feasibility and potential of the approach and sketch future research directions.},
acmid = {2732743},
doi = {10.1145/2702613.2732743},
isbn = {978-1-4503-3146-3},
keywords = {context, life logging, smartphone, video recording},
location = {Seoul, Republic of Korea},
numpages = {6},
timestamp = {2015.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015chiea.pdf},
}
N. Broy, S. Schneegass, M. Guo, F. Alt, and A. Schmidt. Evaluating Stereoscopic 3D for Automotive User Interfaces in a Real-World Driving Study. In Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems (CHI EA ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 1717–1722. doi:10.1145/2702613.2732902
[BibTeX] [Abstract] [PDF]
This paper reports on the use of in-car 3D displays in a real-world driving scenario. Today, stereoscopic displays are becoming ubiquitous in many domains such as mobile phones or TVs. Instead of using 3D for entertainment, we explore the 3D effect as a mean to spatially structure user interface (UI) elements. To evaluate potentials and drawbacks of in-car 3D displays we mounted an autostereoscopic display as instrument cluster in a vehicle and conducted a real-world driving study with 15 experts in automotive UI design. The results show that the 3D effect increases the perceived quality of the UI and enhances the presentation of spatial information (e.g., navigation cues) compared to 2D. However, the effect should be used well-considered to avoid spatial clutter which can increase the system’s complexity.
@InProceedings{broy2015chiea,
author = {Broy, Nora and Schneegass, Stefan and Guo, Mengbing and Alt, Florian and Schmidt, Albrecht},
booktitle = {{Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems}},
title = {{Evaluating Stereoscopic 3D for Automotive User Interfaces in a Real-World Driving Study}},
year = {2015},
address = {New York, NY, USA},
note = {broy2015chiea},
pages = {1717--1722},
publisher = {Association for Computing Machinery},
series = {CHI EA '15},
abstract = {This paper reports on the use of in-car 3D displays in a real-world driving scenario. Today, stereoscopic displays are becoming ubiquitous in many domains such as mobile phones or TVs. Instead of using 3D for entertainment, we explore the 3D effect as a mean to spatially structure user interface (UI) elements. To evaluate potentials and drawbacks of in-car 3D displays we mounted an autostereoscopic display as instrument cluster in a vehicle and conducted a real-world driving study with 15 experts in automotive UI design. The results show that the 3D effect increases the perceived quality of the UI and enhances the presentation of spatial information (e.g., navigation cues) compared to 2D. However, the effect should be used well-considered to avoid spatial clutter which can increase the system's complexity.},
acmid = {2732902},
doi = {10.1145/2702613.2732902},
isbn = {978-1-4503-3146-3},
keywords = {automotive user interfaces, stereoscopic 3D},
location = {Seoul, Republic of Korea},
numpages = {6},
timestamp = {2015.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2015chiea.pdf},
}
D. Buschek and F. Alt. TouchML: A Machine Learning Toolkit for Modelling Spatial Touch Targeting Behaviour. In Proceedings of the 20th International Conference on Intelligent User Interfaces (IUI ’15), Association for Computing Machinery, New York, NY, USA, 2015, p. 110–114. doi:10.1145/2678025.2701381
[BibTeX] [Abstract] [PDF]
Pointing tasks are commonly studied in HCI research, for example to evaluate and compare different interaction techniques or devices. A recent line of work has modelled user-specific touch behaviour with machine learning methods to reveal spatial targeting error patterns across the screen. These models can also be applied to improve accuracy of touchscreens and keyboards, and to recognise users and hand postures. However, no implementation of these techniques has been made publicly available yet, hindering broader use in research and practical deployments. Therefore, this paper presents a toolkit which implements such touch models for data analysis (Python), mobile applications (Java/Android), and the web (JavaScript). We demonstrate several applications, including hand posture recognition, on touch targeting data collected in a study with 24 participants. We consider different target types and hand postures, changing behaviour over time, and the influence of hand sizes.
@InProceedings{buschek2015iui,
author = {Buschek, Daniel and Alt, Florian},
booktitle = {{Proceedings of the 20th International Conference on Intelligent User Interfaces}},
title = {{TouchML: A Machine Learning Toolkit for Modelling Spatial Touch Targeting Behaviour}},
year = {2015},
address = {New York, NY, USA},
note = {buschek2015iui},
pages = {110--114},
publisher = {Association for Computing Machinery},
series = {IUI '15},
abstract = {Pointing tasks are commonly studied in HCI research, for example to evaluate and compare different interaction techniques or devices. A recent line of work has modelled user-specific touch behaviour with machine learning methods to reveal spatial targeting error patterns across the screen. These models can also be applied to improve accuracy of touchscreens and keyboards, and to recognise users and hand postures. However, no implementation of these techniques has been made publicly available yet, hindering broader use in research and practical deployments. Therefore, this paper presents a toolkit which implements such touch models for data analysis (Python), mobile applications (Java/Android), and the web (JavaScript). We demonstrate several applications, including hand posture recognition, on touch targeting data collected in a study with 24 participants. We consider different target types and hand postures, changing behaviour over time, and the influence of hand sizes.},
acmid = {2701381},
doi = {10.1145/2678025.2701381},
isbn = {978-1-4503-3306-1},
keywords = {gaussian process, machine learning, toolkit, touch},
location = {Atlanta, Georgia, USA},
numpages = {5},
timestamp = {2015.03.29},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015iui.pdf},
}
Proceedings of the 4th International Symposium on Pervasive DisplaysNew York, NY, USA: Association for Computing Machinery, 2015.
[BibTeX] [PDF]
@Proceedings{gehring2015perdis,
title = {{Proceedings of the 4th International Symposium on Pervasive Displays}},
year = {2015},
address = {New York, NY, USA},
editor = {Sven Gehring AND Antonio Krüger AND Florian Alt AND Nick Taylor AND Stefan Schneegass},
isbn = {978-1-4503-3608-6},
note = {gehring2015perdis},
publisher = {Association for Computing Machinery},
series = {PerDis '15},
location = {Saarbruecken, Germany},
timestamp = {2015-06-01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/gehring2015perdis.pdf},
}

2014

S. Schneegass, F. Steimle, A. Bulling, F. Alt, and A. Schmidt. SmudgeSafe: Geometric Image Transformations for Smudge-resistant User Authentication. In Proceedings of the 2014 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 775–786. doi:10.1145/2632048.2636090
[BibTeX] [Abstract] [PDF]
Touch-enabled user interfaces have become ubiquitous, such as on ATMs or portable devices. At the same time, authentication using touch input is problematic, since finger smudge traces may allow attackers to reconstruct passwords. We present SmudgeSafe, an authentication system that uses random geometric image transformations, such as translation, rotation, scaling, shearing, and flipping, to increase the security of cued-recall graphical passwords. We describe the design space of these transformations and report on two user studies: A lab-based security study involving 20 participants in attacking user-defined passwords, using high quality pictures of real smudge traces captured on a mobile phone display; and an in-the-field usability study with 374 participants who generated more than 130,000 logins on a mobile phone implementation of SmudgeSafe. Results show that SmudgeSafe significantly increases security compared to authentication schemes based on PINs and lock patterns, and exhibits very high learnability, efficiency, and memorability.
@InProceedings{schneegass2014ubicomp,
author = {Schneegass, Stefan and Steimle, Frank and Bulling, Andreas and Alt, Florian and Schmidt, Albrecht},
booktitle = {{Proceedings of the 2014 ACM International Joint Conference on Pervasive and Ubiquitous Computing}},
title = {{SmudgeSafe: Geometric Image Transformations for Smudge-resistant User Authentication}},
year = {2014},
address = {New York, NY, USA},
note = {schneegass2014ubicomp},
pages = {775--786},
publisher = {Association for Computing Machinery},
series = {UbiComp '14},
abstract = {Touch-enabled user interfaces have become ubiquitous, such as on ATMs or portable devices. At the same time, authentication using touch input is problematic, since finger smudge traces may allow attackers to reconstruct passwords. We present SmudgeSafe, an authentication system that uses random geometric image transformations, such as translation, rotation, scaling, shearing, and flipping, to increase the security of cued-recall graphical passwords. We describe the design space of these transformations and report on two user studies: A lab-based security study involving 20 participants in attacking user-defined passwords, using high quality pictures of real smudge traces captured on a mobile phone display; and an in-the-field usability study with 374 participants who generated more than 130,000 logins on a mobile phone implementation of SmudgeSafe. Results show that SmudgeSafe significantly increases security compared to authentication schemes based on PINs and lock patterns, and exhibits very high learnability, efficiency, and memorability.},
acmid = {2636090},
doi = {10.1145/2632048.2636090},
isbn = {978-1-4503-2968-2},
keywords = {finger smudge traces, graphical passwords, touch input},
location = {Seattle, Washington},
numpages = {12},
timestamp = {2014.09.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014ubicomp.pdf},
}
N. Broy, F. Alt, S. Schneegass, and B. Pfleging. 3D Displays in Cars: Exploring the User Performance for a Stereoscopic Instrument Cluster. In Proceedings of the 6th International Conference on Automotive User Interfaces and Interactive Vehicular Applications (AutomotiveUI ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 2:1–2:9. doi:10.1145/2667317.2667319
[BibTeX] [Abstract] [PDF]
In this paper, we investigate user performance for stereoscopic automotive user interfaces (UI). Our work is motivated by the fact that stereoscopic displays are about to find their way into cars. Such a safety-critical application area creates an inherent need to understand how the use of stereoscopic 3D visualizations impacts user performance. We conducted a comprehensive study with 56 participants to investigate the impact of a 3D instrument cluster (IC) on primary and secondary task performance. We investigated different visualizations (2D and 3D) and complexities (low vs. high amount of details) of the IC as well as two 3D display technologies (shutter vs. autostereoscopy). As secondary tasks the participants judged spatial relations between UI elements (expected events) and reacted on pop-up instructions (unexpected events) in the IC. The results show that stereoscopy increases accuracy for expected events, decreases task completion times for unexpected tasks, and increases the attractiveness of the interface. Furthermore, we found a significant influence of the used technology, indicating that secondary task performance improves for shutter displays.
@InProceedings{broy2014autoui,
author = {Broy, Nora and Alt, Florian and Schneegass, Stefan and Pfleging, Bastian},
booktitle = {{Proceedings of the 6th International Conference on Automotive User Interfaces and Interactive Vehicular Applications}},
title = {{3D Displays in Cars: Exploring the User Performance for a Stereoscopic Instrument Cluster}},
year = {2014},
address = {New York, NY, USA},
note = {broy2014autoui},
pages = {2:1--2:9},
publisher = {Association for Computing Machinery},
series = {AutomotiveUI '14},
abstract = {In this paper, we investigate user performance for stereoscopic automotive user interfaces (UI). Our work is motivated by the fact that stereoscopic displays are about to find their way into cars. Such a safety-critical application area creates an inherent need to understand how the use of stereoscopic 3D visualizations impacts user performance. We conducted a comprehensive study with 56 participants to investigate the impact of a 3D instrument cluster (IC) on primary and secondary task performance. We investigated different visualizations (2D and 3D) and complexities (low vs. high amount of details) of the IC as well as two 3D display technologies (shutter vs. autostereoscopy). As secondary tasks the participants judged spatial relations between UI elements (expected events) and reacted on pop-up instructions (unexpected events) in the IC. The results show that stereoscopy increases accuracy for expected events, decreases task completion times for unexpected tasks, and increases the attractiveness of the interface. Furthermore, we found a significant influence of the used technology, indicating that secondary task performance improves for shutter displays.},
acmid = {2667319},
articleno = {2},
doi = {10.1145/2667317.2667319},
isbn = {978-1-4503-3212-5},
keywords = {Automotive UIs, stereoscopic 3D, user performance},
location = {Seattle, WA, USA},
numpages = {9},
timestamp = {2014.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014autoui.pdf},
}
S. Schneegass, F. Alt, J. Scheible, and A. Schmidt. Midair Displays: Concept and First Experiences with Free-Floating Pervasive Displays. In Proceedings of the 2014 ACM International Symposium on Pervasive Displays (PerDis ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 27:27–27:31. doi:10.1145/2611009.2611013
[BibTeX] [Abstract] [PDF]
Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public space. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or as group displays. We see midair displays as a complementary technology to wearable displays. In contrast to statically deployed displays they allow information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, e.g., copter drones, such displays can be easily built. A study on the readability of such displays showcases the potential and feasibility of the concept and provides early insights.
@InProceedings{schneegass2014perdis1,
author = {Schneegass, Stefan and Alt, Florian and Scheible, J\"{u}rgen and Schmidt, Albrecht},
booktitle = {{Proceedings of the 2014 ACM International Symposium on Pervasive Displays}},
title = {{Midair Displays: Concept and First Experiences with Free-Floating Pervasive Displays}},
year = {2014},
address = {New York, NY, USA},
note = {schneegass2014perdis1},
pages = {27:27--27:31},
publisher = {Association for Computing Machinery},
series = {PerDis '14},
abstract = {Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public space. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or as group displays. We see midair displays as a complementary technology to wearable displays. In contrast to statically deployed displays they allow information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, e.g., copter drones, such displays can be easily built. A study on the readability of such displays showcases the potential and feasibility of the concept and provides early insights.},
acmid = {2611013},
articleno = {27},
doi = {10.1145/2611009.2611013},
isbn = {978-1-4503-2952-1},
keywords = {Drones, Free-Floating Displays, Interaction Techniques, Midair Displays, Pervasive Display},
location = {Copenhagen, Denmark},
numpages = {5},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014perdis1.pdf},
}
F. Steinberger, M. Foth, and F. Alt. Vote With Your Feet: Local Community Polling on Urban Screens. In Proceedings of the 2014 ACM International Symposium on Pervasive Displays (PerDis ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 44:44–44:49. doi:10.1145/2611009.2611015
[BibTeX] [Abstract] [PDF]
Falling prices have led to an ongoing spread of public displays in urban areas. Still, they mostly show passive content such as commercials and digital signage. At the same time, technological advances have enabled the creation of interactive displays potentially increasing their attractiveness for the audience, e.g. through providing a platform for civic discourse. This poses considerable challenges, since displays need to communicate the opportunity to engage, motivate the audience to do so, and be easy to use. In this paper we present Vote With Your Feet, a hyperlocal public polling tool for urban screens allowing users to express their opinions. Similar to vox populi interviews on TV or polls on news websites, the tool is meant to reflect the mindset of the community on topics such as current affairs, cultural identity and local matters. It is novel in that it focuses on a situated civic discourse and provides a tangible user interface, tackling the mentioned challenges. It shows one Yes/No question at a time and enables users to vote by stepping on one of two tangible buttons on the ground. This user interface was introduced to attract people’s attention and to lower participation barriers. Our field study showed that Vote With Your Feet is perceived as inviting and that it can spark discussions among co-located people.
@InProceedings{steinberger2014perdis,
author = {Steinberger, Fabius and Foth, Marcus and Alt, Florian},
booktitle = {{Proceedings of the 2014 ACM International Symposium on Pervasive Displays}},
title = {{Vote With Your Feet: Local Community Polling on Urban Screens}},
year = {2014},
address = {New York, NY, USA},
note = {steinberger2014perdis},
pages = {44:44--44:49},
publisher = {Association for Computing Machinery},
series = {PerDis '14},
abstract = {Falling prices have led to an ongoing spread of public displays in urban areas. Still, they mostly show passive content such as commercials and digital signage. At the same time, technological advances have enabled the creation of interactive displays potentially increasing their attractiveness for the audience, e.g. through providing a platform for civic discourse. This poses considerable challenges, since displays need to communicate the opportunity to engage, motivate the audience to do so, and be easy to use. In this paper we present Vote With Your Feet, a hyperlocal public polling tool for urban screens allowing users to express their opinions. Similar to vox populi interviews on TV or polls on news websites, the tool is meant to reflect the mindset of the community on topics such as current affairs, cultural identity and local matters. It is novel in that it focuses on a situated civic discourse and provides a tangible user interface, tackling the mentioned challenges. It shows one Yes/No question at a time and enables users to vote by stepping on one of two tangible buttons on the ground. This user interface was introduced to attract people's attention and to lower participation barriers. Our field study showed that Vote With Your Feet is perceived as inviting and that it can spark discussions among co-located people.},
acmid = {2611015},
articleno = {44},
doi = {10.1145/2611009.2611015},
isbn = {978-1-4503-2952-1},
keywords = {Polling, civic engagement, public displays, tangible media, ubiquitous computing, urban computing, urban informatics, voting},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/steinberger2014perdis.pdf},
}
S. Schneegass and F. Alt. SenScreen: A Toolkit for Supporting Sensor-enabled Multi-Display Networks. In Proceedings of the 2014 ACM International Symposium on Pervasive Displays (PerDis ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 92:92–92:97. doi:10.1145/2611009.2611017
[BibTeX] [Abstract] [PDF]
Over the past years, a number of sensors have emerged, that enable gesture-based interaction with public display applications, including Microsoft Kinect, Asus Xtion, and Leap Motion. In this way, interaction with displays can be made more attractive, particularly if deployed across displays hence involving many users. However, interactive applications are still scarce, which can be attributed to the fact that developers usually need to implement a low-level connection to the sensor. In this work, we tackle this issue by presenting a toolkit, called SenScreen, consisting of (a) easy-to-install adapters that handle the low-level connection to sensors and provides the data via (b) an API that allows developers to write their applications in JavaScript. We evaluate our approach by letting two groups of developers create an interactive game each using our toolkit. Observation, interviews, and questionnaire indicate that our toolkit simplifies the implementation of interactive applications and may, hence, serve as a first step towards a more widespread use of interactive public displays.
@InProceedings{schneegass2014perdis2,
author = {Schneegass, Stefan and Alt, Florian},
booktitle = {{Proceedings of the 2014 ACM International Symposium on Pervasive Displays}},
title = {{SenScreen: A Toolkit for Supporting Sensor-enabled Multi-Display Networks}},
year = {2014},
address = {New York, NY, USA},
note = {schneegass2014perdis2},
pages = {92:92--92:97},
publisher = {Association for Computing Machinery},
series = {PerDis '14},
abstract = {Over the past years, a number of sensors have emerged, that enable gesture-based interaction with public display applications, including Microsoft Kinect, Asus Xtion, and Leap Motion. In this way, interaction with displays can be made more attractive, particularly if deployed across displays hence involving many users. However, interactive applications are still scarce, which can be attributed to the fact that developers usually need to implement a low-level connection to the sensor. In this work, we tackle this issue by presenting a toolkit, called SenScreen, consisting of (a) easy-to-install adapters that handle the low-level connection to sensors and provides the data via (b) an API that allows developers to write their applications in JavaScript. We evaluate our approach by letting two groups of developers create an interactive game each using our toolkit. Observation, interviews, and questionnaire indicate that our toolkit simplifies the implementation of interactive applications and may, hence, serve as a first step towards a more widespread use of interactive public displays.},
acmid = {2611017},
articleno = {92},
doi = {10.1145/2611009.2611017},
isbn = {978-1-4503-2952-1},
keywords = {Interactive Applications, Public Display Architecture, Toolkits},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014perdis2.pdf},
}
N. Broy, S. Höckh, A. Frederiksen, M. Gilowski, J. Eichhorn, F. Naser, H. Jung, J. Niemann, M. Schell, A. Schmid, and F. Alt. Exploring Design Parameters for a 3D Head-Up Display. In Proceedings of the 2014 ACM International Symposium on Pervasive Displays (PerDis ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 38:38–38:43. doi:10.1145/2611009.2611011
[BibTeX] [Abstract] [PDF]
Today, head-up displays (HUDs) are commonly used in cars to show basic driving information in the visual field of the viewer. This allows information to be perceived in a quick and easy to understand manner. With advances in technology, HUDs will allow richer information to be conveyed to the driver by exploiting the third dimension. We envision a stereoscopic HUD for displaying content in 3D space. This requires an understanding of how parallaxes impact the user’s performance and comfort, which is the focus of this work. In two user studies, involving 49 participants, we (a) gather insights into how projection distances and stereoscopic visualizations influence the comfort zone and (b) the depth judgment of the user. The results show that with larger projection distances both the comfort zone and the minimum comfortable viewing distance increase. Higher distances between the viewer and a real world object to be judged decrease the judgment accuracy.
@InProceedings{broy2014perdis,
author = {Broy, Nora and H\"{o}ckh, Simone and Frederiksen, Annette and Gilowski, Michael and Eichhorn, Julian and Naser, Felix and Jung, Horst and Niemann, Julia and Schell, Martin and Schmid, Albrecht and Alt, Florian},
booktitle = {{Proceedings of the 2014 ACM International Symposium on Pervasive Displays}},
title = {{Exploring Design Parameters for a 3D Head-Up Display}},
year = {2014},
address = {New York, NY, USA},
note = {broy2014perdis},
pages = {38:38--38:43},
publisher = {Association for Computing Machinery},
series = {PerDis '14},
abstract = {Today, head-up displays (HUDs) are commonly used in cars to show basic driving information in the visual field of the viewer. This allows information to be perceived in a quick and easy to understand manner. With advances in technology, HUDs will allow richer information to be conveyed to the driver by exploiting the third dimension. We envision a stereoscopic HUD for displaying content in 3D space. This requires an understanding of how parallaxes impact the user's performance and comfort, which is the focus of this work. In two user studies, involving 49 participants, we (a) gather insights into how projection distances and stereoscopic visualizations influence the comfort zone and (b) the depth judgment of the user. The results show that with larger projection distances both the comfort zone and the minimum comfortable viewing distance increase. Higher distances between the viewer and a real world object to be judged decrease the judgment accuracy.},
acmid = {2611011},
articleno = {38},
doi = {10.1145/2611009.2611011},
isbn = {978-1-4503-2952-1},
keywords = {3D Displays, Automotive UIs, Head-Up Displays, Human Factors},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014perdis.pdf},
}
N. Davies, S. Clinch, and F. Alt, Pervasive Displays – Understanding the Future of Digital Signage, {Morgan and Claypool Publishers}, 2014.
[BibTeX] [Abstract] [PDF]
Fueled by falling display hardware costs and rising demand, digital signage and pervasive displaysare becoming ever more ubiquitous. Such systems have traditionally been used for advertising andinformation dissemination with digital signage commonplace in shopping malls, airports and publicspaces.While advertising and broadcasting announcements remain important applications, developmentsin sensing and interaction technologies are enabling entirely newclasses of display applicationsthat tailor content to the situation and audience of the display. As a result, signage systems arebeginning to transition from simple broadcast systems to rich platforms for communication andinteraction.In this lecture we provide an introduction to this emerging field for researchers and practitionersinterested in creating state-of-the-art pervasive display systems. We begin by describingthe history of pervasive display research, providing illustrations of key systems, from pioneeringwork on supporting collaboration to contemporary systems designed for personalized informationdelivery.We then consider what the near-future might hold for display networks—describing a seriesof compelling applications that are being postulated for future display networks. Creating suchsystems raises a wide range of challenges and requires designers to make a series of important tradeoffs.We dedicate four chapters to key aspects of pervasive display design: audience engagement,display interaction, system software and system evaluation. These chapters provide an overview ofcurrent thinking in each area. Finally, we present a series of case studies of display systems and ourconcluding remarks.
@Book{davies2014synthesis,
author = {Nigel Davies AND Sarah Clinch AND Florian Alt},
publisher = {{Morgan and Claypool Publishers}},
title = {{Pervasive Displays - Understanding the Future of Digital Signage}},
year = {2014},
note = {davies2014synthesis},
series = {Synthesis Lectures},
abstract = {Fueled by falling display hardware costs and rising demand, digital signage and pervasive displaysare becoming ever more ubiquitous. Such systems have traditionally been used for advertising andinformation dissemination with digital signage commonplace in shopping malls, airports and publicspaces.While advertising and broadcasting announcements remain important applications, developmentsin sensing and interaction technologies are enabling entirely newclasses of display applicationsthat tailor content to the situation and audience of the display. As a result, signage systems arebeginning to transition from simple broadcast systems to rich platforms for communication andinteraction.In this lecture we provide an introduction to this emerging field for researchers and practitionersinterested in creating state-of-the-art pervasive display systems. We begin by describingthe history of pervasive display research, providing illustrations of key systems, from pioneeringwork on supporting collaboration to contemporary systems designed for personalized informationdelivery.We then consider what the near-future might hold for display networks—describing a seriesof compelling applications that are being postulated for future display networks. Creating suchsystems raises a wide range of challenges and requires designers to make a series of important tradeoffs.We dedicate four chapters to key aspects of pervasive display design: audience engagement,display interaction, system software and system evaluation. These chapters provide an overview ofcurrent thinking in each area. Finally, we present a series of case studies of display systems and ourconcluding remarks.},
booktitle = {Pervasive Displays - Understanding the Future of Digital Signage},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/davies2014synthesis.pdf},
}
N. Broy, S. Schneegass, F. Alt, and A. Schmidt. FrameBox and MirrorBox: Tools and Guidelines to Support Designers in Prototyping Interfaces for 3D Displays. In Proceedings of the 32nd Annual Association for Computing Machinery Conference on Human Factors in Computing Systems (CHI ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 2037–2046. doi:10.1145/2556288.2557183
[BibTeX] [Abstract] [PDF]
In this paper, we identify design guidelines for stereoscopic 3D (S3D) user interfaces (UIs) and present the MirrorBox and the FrameBox, two UI prototyping tools for S3D displays. As auto-stereoscopy becomes available for the mass market we believe the design of S3D UIs for devices, for example, mobile phones, public displays, or car dashboards, will rapidly gain importance. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. For example, important information can be shown in front of less important information. This paper identifies core requirements for designing S3D UIs and derives concrete guidelines. The requirements also serve as a basis for two depth layout tools we built with the aim to overcome limitations of traditional prototyping when sketching S3D UIs. We evaluated the tools with usability experts and compared them to traditional paper prototyping.
@InProceedings{broy2014chi,
author = {Broy, Nora and Schneegass, Stefan and Alt, Florian and Schmidt, Albrecht},
booktitle = {{Proceedings of the 32nd Annual Association for Computing Machinery Conference on Human Factors in Computing Systems}},
title = {{FrameBox and MirrorBox: Tools and Guidelines to Support Designers in Prototyping Interfaces for 3D Displays}},
year = {2014},
address = {New York, NY, USA},
note = {broy2014chi},
pages = {2037--2046},
publisher = {Association for Computing Machinery},
series = {CHI '14},
abstract = {In this paper, we identify design guidelines for stereoscopic 3D (S3D) user interfaces (UIs) and present the MirrorBox and the FrameBox, two UI prototyping tools for S3D displays. As auto-stereoscopy becomes available for the mass market we believe the design of S3D UIs for devices, for example, mobile phones, public displays, or car dashboards, will rapidly gain importance. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. For example, important information can be shown in front of less important information. This paper identifies core requirements for designing S3D UIs and derives concrete guidelines. The requirements also serve as a basis for two depth layout tools we built with the aim to overcome limitations of traditional prototyping when sketching S3D UIs. We evaluated the tools with usability experts and compared them to traditional paper prototyping.},
acmid = {2557183},
doi = {10.1145/2556288.2557183},
isbn = {978-1-4503-2473-1},
keywords = {prototyping, stereoscopic 3d, user interfaces},
location = {Toronto, Ontario, Canada},
numpages = {10},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014chi.pdf},
}
M. Greis, F. Alt, N. Henze, and N. Memarovic. I Can Wait a Minute: Uncovering the Optimal Delay Time for Pre-moderated User-generated Content on Public Displays. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems (CHI ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 1435–1438. doi:10.1145/2556288.2557186
[BibTeX] [Abstract] [PDF]
Public displays have advanced from isolated and non interactive “ad” displays which show images and videos to displays that are networked, interactive, and open to a wide variety of content and applications. Prior work has shown large potential of user-generated content on public displays. However, one of the problems with user-generated content on public displays is moderation as content may be explicit or troublesome for a particular location. In this work we explore the expectations of users with regard to content moderation on public displays. An online survey revealed that people not only think that display content should be moderated but also that a delay of up to 10 minutes is acceptable if display content is moderated. In a subsequent in the wild deployment we compared different moderation delays. We found that a moderation delay significantly decreases the number of user-generated posts while at the same time there is no significant effect on users’ decision to repeatedly post on the display.
@InProceedings{greis2014chi,
author = {Greis, Miriam and Alt, Florian and Henze, Niels and Memarovic, Nemanja},
booktitle = {{Proceedings of the SIGCHI Conference on Human Factors in Computing Systems}},
title = {{I Can Wait a Minute: Uncovering the Optimal Delay Time for Pre-moderated User-generated Content on Public Displays}},
year = {2014},
address = {New York, NY, USA},
note = {greis2014chi},
pages = {1435--1438},
publisher = {Association for Computing Machinery},
series = {CHI '14},
abstract = {Public displays have advanced from isolated and non interactive "ad" displays which show images and videos to displays that are networked, interactive, and open to a wide variety of content and applications. Prior work has shown large potential of user-generated content on public displays. However, one of the problems with user-generated content on public displays is moderation as content may be explicit or troublesome for a particular location. In this work we explore the expectations of users with regard to content moderation on public displays. An online survey revealed that people not only think that display content should be moderated but also that a delay of up to 10 minutes is acceptable if display content is moderated. In a subsequent in the wild deployment we compared different moderation delays. We found that a moderation delay significantly decreases the number of user-generated posts while at the same time there is no significant effect on users' decision to repeatedly post on the display.},
acmid = {2557186},
doi = {10.1145/2556288.2557186},
isbn = {978-1-4503-2473-1},
keywords = {content moderation, public displays, twitter},
location = {Toronto, Ontario, Canada},
numpages = {4},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/greis2014chi.pdf},
}
J. R. Häkkilä, M. Posti, S. Schneegass, F. Alt, K. Gultekin, and A. Schmidt. Let Me Catch This!: Experiencing Interactive 3D Cinema Through Collecting Content with a Mobile Phone. In Proceedings of the 32nd Annual Association for Computing Machinery Conference on Human Factors in Computing Systems (CHI ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 1011–1020. doi:10.1145/2556288.2557187
[BibTeX] [Abstract] [PDF]
The entertainment industry is going through a transformation, and technology development is affecting how we can enjoy and interact with the entertainment media content in new ways. In our work, we explore how to enable interaction with content in the context of 3D cinemas. This allows viewers to use their mobile phone to retrieve, for example, information on the artist of the soundtrack currently playing or a discount coupon on the watch the main actor is wearing. We are particularly interested in the user experience of the interactive 3D cinema concept, and how different interactive elements and interaction techniques are perceived. We report on the development of a prototype application utilizing smart phones and on an evaluation in a cinema context with 20 participants. Results emphasize that designing for interactive cinema experiences should drive for holistic and positive user experiences. Interactive content should be tied together with the actual video content, but integrated into contexts where it does not conflict with the immersive experience with the movie.
@InProceedings{hakkila2014chi,
author = {H\"{a}kkil\"{a}, Jonna R. and Posti, Maaret and Schneegass, Stefan and Alt, Florian and Gultekin, Kunter and Schmidt, Albrecht},
booktitle = {{Proceedings of the 32nd Annual Association for Computing Machinery Conference on Human Factors in Computing Systems}},
title = {{Let Me Catch This!: Experiencing Interactive 3D Cinema Through Collecting Content with a Mobile Phone}},
year = {2014},
address = {New York, NY, USA},
note = {hakkila2014chi},
pages = {1011--1020},
publisher = {Association for Computing Machinery},
series = {CHI '14},
abstract = {The entertainment industry is going through a transformation, and technology development is affecting how we can enjoy and interact with the entertainment media content in new ways. In our work, we explore how to enable interaction with content in the context of 3D cinemas. This allows viewers to use their mobile phone to retrieve, for example, information on the artist of the soundtrack currently playing or a discount coupon on the watch the main actor is wearing. We are particularly interested in the user experience of the interactive 3D cinema concept, and how different interactive elements and interaction techniques are perceived. We report on the development of a prototype application utilizing smart phones and on an evaluation in a cinema context with 20 participants. Results emphasize that designing for interactive cinema experiences should drive for holistic and positive user experiences. Interactive content should be tied together with the actual video content, but integrated into contexts where it does not conflict with the immersive experience with the movie.},
acmid = {2557187},
doi = {10.1145/2556288.2557187},
isbn = {978-1-4503-2473-1},
keywords = {3d, interactive cinema, mobile phone interaction, user experience, user studies},
location = {Toronto, Ontario, Canada},
numpages = {10},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hakkila2014chi.pdf},
}
S. Schneegass, F. Alt, J. Scheible, A. Schmidt, and H. Su. Midair Displays: Exploring the Concept of Free-floating Public Displays. In CHI ’14 Extended Abstracts on Human Factors in Computing Systems (CHI EA ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 2035–2040. doi:10.1145/2559206.2581190
[BibTeX] [Abstract] [PDF]
Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public spaces. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or they could be used as group displays which enable information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, for example copter drones, such displays can be easily built.
@InProceedings{schneegass2014chiea,
author = {Schneegass, Stefan and Alt, Florian and Scheible, J\"{u}rgen and Schmidt, Albrecht and Su, Haifeng},
booktitle = {{CHI '14 Extended Abstracts on Human Factors in Computing Systems}},
title = {{Midair Displays: Exploring the Concept of Free-floating Public Displays}},
year = {2014},
address = {New York, NY, USA},
note = {schneegass2014chiea},
pages = {2035--2040},
publisher = {Association for Computing Machinery},
series = {CHI EA '14},
abstract = {Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public spaces. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or they could be used as group displays which enable information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, for example copter drones, such displays can be easily built.},
acmid = {2581190},
doi = {10.1145/2559206.2581190},
isbn = {978-1-4503-2474-8},
keywords = {drones, midair displays, public displays},
location = {Toronto, Ontario, Canada},
numpages = {6},
timestamp = {2014.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014chiea.pdf},
}
N. Broy, B. J. Zierer, S. Schneegass, and F. Alt. Exploring virtual depth for automotive instrument cluster concepts. In Proceedings of the Extended Abstracts of the 32nd Annual Association for Computing Machinery Conference on Human Factors in Computing Systems (CHI EA ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 1783–1788. doi:10.1145/2559206.2581362
[BibTeX] [Abstract] [PDF]
This paper compares the user experience of three novel concept designs for 3D-based car dashboards. Our work is motivated by the fact that analogue dashboards are currently being replaced by their digital counterparts. At the same time, auto-stereoscopic displays enter the market, allowing the quality of novel dashboards to be increased, both with regard to the perceived quality and in supporting the driving task. Since no guidelines or principles exist for the design of digital 3D dashboards, we take an initial step in designing and evaluating such interfaces. In a study with 12 participants we were able to show that stereoscopic 3D increases the perceived quality of the display while motion parallax leads to a rather disturbing experience.
@InProceedings{broy2014chiea,
author = {Broy, Nora and Zierer, Benedikt J. and Schneegass, Stefan and Alt, Florian},
booktitle = {{Proceedings of the Extended Abstracts of the 32nd Annual Association for Computing Machinery Conference on Human Factors in Computing Systems}},
title = {Exploring Virtual Depth for Automotive Instrument Cluster Concepts},
year = {2014},
address = {New York, NY, USA},
note = {broy2014chiea},
pages = {1783--1788},
publisher = {Association for Computing Machinery},
series = {CHI EA '14},
abstract = {This paper compares the user experience of three novel concept designs for 3D-based car dashboards. Our work is motivated by the fact that analogue dashboards are currently being replaced by their digital counterparts. At the same time, auto-stereoscopic displays enter the market, allowing the quality of novel dashboards to be increased, both with regard to the perceived quality and in supporting the driving task. Since no guidelines or principles exist for the design of digital 3D dashboards, we take an initial step in designing and evaluating such interfaces. In a study with 12 participants we were able to show that stereoscopic 3D increases the perceived quality of the display while motion parallax leads to a rather disturbing experience.},
acmid = {2581362},
doi = {10.1145/2559206.2581362},
isbn = {978-1-4503-2474-8},
keywords = {automotive user interfaces, motion parallax, stereoscopic 3d, user experience},
location = {Toronto, Ontario, Canada},
numpages = {6},
timestamp = {2014.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014chiea.pdf},
}
M. Pfeiffer, S. Schneegass, F. Alt, and M. Rohs. A Design Space for Electrical Muscle Stimulation Feedback for Freehand Interaction. In Proceedings of the First CHI Workshop on Assistive Augmentation (Assistive Augmentation ’14), 2014.
[BibTeX] [Abstract] [PDF]
Free-hand interaction becomes a common technique forinteracting with large displays. At the same time,providing haptic feedback for free-hand interaction is stilla challenge, particularly feedback with dierentcharacteristics (i.e., strengths, patterns) to conveyparticular information. We see electrical musclestimulation (EMS) as a well-suited technology forproviding haptic feedback in this domain. Thecharacteristics of EMS can be used to assist users inlearning, manipulating, and perceiving virtual objects.One of the core challenges is to understand thesecharacteristics and how they can be applied. As a step inthis direction, this paper presents a design space thatidenties dierent aspects of using EMS for hapticfeedback. The design space is meant as a basis for futureresearch investigating how particular characteristics can beexploited to provide specic haptic feedback.
@InProceedings{pfeiffer2014asstech,
author = {Pfeiffer, Max AND Schneegass, Stefan AND Alt, Florian and Rohs, Michael},
title = {{A Design Space for Electrical Muscle Stimulation Feedback for Freehand Interaction}},
booktitle = {{Proceedings of the First CHI Workshop on Assistive Augmentation}},
year = {2014},
series = {Assistive Augmentation '14},
note = {pfeiffer2014asstech},
abstract = {Free-hand interaction becomes a common technique forinteracting with large displays. At the same time,providing haptic feedback for free-hand interaction is stilla challenge, particularly feedback with dierentcharacteristics (i.e., strengths, patterns) to conveyparticular information. We see electrical musclestimulation (EMS) as a well-suited technology forproviding haptic feedback in this domain. Thecharacteristics of EMS can be used to assist users inlearning, manipulating, and perceiving virtual objects.One of the core challenges is to understand thesecharacteristics and how they can be applied. As a step inthis direction, this paper presents a design space thatidenties dierent aspects of using EMS for hapticfeedback. The design space is meant as a basis for futureresearch investigating how particular characteristics can beexploited to provide specic haptic feedback.},
location = {Toronto, Canada},
numpages = {6},
timestamp = {2014.04.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeiffer2014asstech.pdf},
}
F. Alt, N. Memarovic, M. Greis, and N. Henze. UniDisplay – A Research Prototype to Investigate Expectations Towards Public Display Applications. In Proceedings of the 1st Workshop on Developing Applications for Pervasive Display Networks (PD-Apps ’14), IEEE, 2014.
[BibTeX] [Abstract] [PDF]
As public display networks become open, noveltypes of interaction applications emerge. In particular, we expectapplications that support user-generated content to rapidly gainimportance, since they provide a tangible benefit for the userin the form of digital bulletin boards, discussion platform thatfoster public engagement, and applications that allow for selfexpression.At the same time, such applications infer severalchallenges: first, they need to provide suitable means for thepasserby to contribute content to the application; second, mechanismsneed to be employed that provide sufficient control for thedisplay owner with regard to content moderation; and third, theusers’ expectations with regard to the posting procedure needsto be well understood. In this paper we present UniDisplay, aresearch prototype that enables users to post text and images toa public display. We report on the design and development ofthe application and provide early insights of the deployment ina University setting.
@InProceedings{alt2014pdapps,
author = {Alt, Florian and Memarovic, Nemanja AND Greis, Miriam and Henze, Niels},
title = {{UniDisplay - A Research Prototype to Investigate Expectations Towards Public Display Applications}},
booktitle = {{Proceedings of the 1st Workshop on Developing Applications for Pervasive Display Networks}},
year = {2014},
series = {PD-Apps '14},
publisher = {IEEE},
note = {alt2014pdapps},
abstract = {As public display networks become open, noveltypes of interaction applications emerge. In particular, we expectapplications that support user-generated content to rapidly gainimportance, since they provide a tangible benefit for the userin the form of digital bulletin boards, discussion platform thatfoster public engagement, and applications that allow for selfexpression.At the same time, such applications infer severalchallenges: first, they need to provide suitable means for thepasserby to contribute content to the application; second, mechanismsneed to be employed that provide sufficient control for thedisplay owner with regard to content moderation; and third, theusers’ expectations with regard to the posting procedure needsto be well understood. In this paper we present UniDisplay, aresearch prototype that enables users to post text and images toa public display. We report on the design and development ofthe application and provide early insights of the deployment ina University setting.},
keywords = {UniDisplay, Public Displays},
location = {Budapest, Israel},
numpages = {6},
owner = {florianalt},
timestamp = {2014.03.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2014pdapps.pdf},
}
M. Pfeiffer, S. Schneegass, F. Alt, and M. Rohs. Let Me Grab This: A Comparison of EMS and Vibration for Haptic Feedback in Free-hand Interaction. In Proceedings of the 5th Augmented Human International Conference (AH ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 48:1–48:8. doi:10.1145/2582051.2582099
[BibTeX] [Abstract] [PDF]
Free-hand interaction with large displays is getting more common, for example in public settings and exertion games. Adding haptic feedback offers the potential for more realistic and immersive experiences. While vibrotactile feedback is well known, electrical muscle stimulation (EMS) has not yet been explored in free-hand interaction with large displays. EMS offers a wide range of different strengths and qualities of haptic feedback. In this paper we first systematically investigate the design space for haptic feedback. Second, we experimentally explore differences between strengths of EMS and vibrotactile feedback. Third, based on the results, we evaluate EMS and vibrotactile feedback with regard to different virtual objects (soft, hard) and interaction with different gestures (touch, grasp, punch) in front of a large display. The results provide a basis for the design of haptic feedback that is appropriate for the given type of interaction and the material.
@InProceedings{pfeiffer2014ah,
author = {Pfeiffer, Max and Schneegass, Stefan and Alt, Florian and Rohs, Michael},
booktitle = {{Proceedings of the 5th Augmented Human International Conference}},
title = {{Let Me Grab This: A Comparison of EMS and Vibration for Haptic Feedback in Free-hand Interaction}},
year = {2014},
address = {New York, NY, USA},
note = {pfeiffer2014ah},
pages = {48:1--48:8},
publisher = {Association for Computing Machinery},
series = {AH '14},
abstract = {Free-hand interaction with large displays is getting more common, for example in public settings and exertion games. Adding haptic feedback offers the potential for more realistic and immersive experiences. While vibrotactile feedback is well known, electrical muscle stimulation (EMS) has not yet been explored in free-hand interaction with large displays. EMS offers a wide range of different strengths and qualities of haptic feedback. In this paper we first systematically investigate the design space for haptic feedback. Second, we experimentally explore differences between strengths of EMS and vibrotactile feedback. Third, based on the results, we evaluate EMS and vibrotactile feedback with regard to different virtual objects (soft, hard) and interaction with different gestures (touch, grasp, punch) in front of a large display. The results provide a basis for the design of haptic feedback that is appropriate for the given type of interaction and the material.},
acmid = {2582099},
articleno = {48},
doi = {10.1145/2582051.2582099},
isbn = {978-1-4503-2761-9},
keywords = {electrical muscle stimulation, free-hand interaction, haptic feedback, large displays, tactile feedback},
location = {Kobe, Japan},
numpages = {8},
timestamp = {2014.03.07},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeiffer2014ah.pdf},
}
F. Alt, S. Schneegass, J. Auda, R. Rzayev, and N. Broy. Using eye-tracking to support interaction with layered 3d interfaces on stereoscopic displays. In Proceedings of the 19th international conference on intelligent user interfaces (IUI ’14), Association for Computing Machinery, New York, NY, USA, 2014, p. 267–272. doi:10.1145/2557500.2557518
[BibTeX] [Abstract] [PDF]
In this paper, we investigate the concept of gaze-based interaction with 3D user interfaces. We currently see stereo vision displays becoming ubiquitous, particularly as auto-stereoscopy enables the perception of 3D content without the use of glasses. As a result, application areas for 3D beyond entertainment in cinema or at home emerge, including work settings, mobile phones, public displays, and cars. At the same time, eye tracking is hitting the consumer market with low-cost devices. We envision eye trackers in the future to be integrated with consumer devices (laptops, mobile phones, displays), hence allowing the user’s gaze to be analyzed and used as input for interactive applications. A particular challenge when applying this concept to 3D displays is that current eye trackers provide the gaze point in 2D only (x and y coordinates). In this paper, we compare the performance of two methods that use the eye’s physiology for calculating the gaze point in 3D space, hence enabling gaze-based interaction with stereoscopic content. Furthermore, we provide a comparison of gaze interaction in 2D and 3D with regard to user experience and performance. Our results show that with current technology, eye tracking on stereoscopic displays is possible with similar performance as on standard 2D screens.
@InProceedings{alt2014iui,
author = {Alt, Florian and Schneegass, Stefan and Auda, Jonas and Rzayev, Rufat and Broy, Nora},
booktitle = {Proceedings of the 19th International Conference on Intelligent User Interfaces},
title = {Using Eye-tracking to Support Interaction with Layered 3D Interfaces on Stereoscopic Displays},
year = {2014},
address = {New York, NY, USA},
note = {alt2014iui},
pages = {267--272},
publisher = {Association for Computing Machinery},
series = {IUI '14},
abstract = {In this paper, we investigate the concept of gaze-based interaction with 3D user interfaces. We currently see stereo vision displays becoming ubiquitous, particularly as auto-stereoscopy enables the perception of 3D content without the use of glasses. As a result, application areas for 3D beyond entertainment in cinema or at home emerge, including work settings, mobile phones, public displays, and cars. At the same time, eye tracking is hitting the consumer market with low-cost devices. We envision eye trackers in the future to be integrated with consumer devices (laptops, mobile phones, displays), hence allowing the user's gaze to be analyzed and used as input for interactive applications. A particular challenge when applying this concept to 3D displays is that current eye trackers provide the gaze point in 2D only (x and y coordinates). In this paper, we compare the performance of two methods that use the eye's physiology for calculating the gaze point in 3D space, hence enabling gaze-based interaction with stereoscopic content. Furthermore, we provide a comparison of gaze interaction in 2D and 3D with regard to user experience and performance. Our results show that with current technology, eye tracking on stereoscopic displays is possible with similar performance as on standard 2D screens.},
acmid = {2557518},
doi = {10.1145/2557500.2557518},
isbn = {978-1-4503-2184-6},
keywords = {3d, eye tracking, gaze interaction, stereoscopic displays},
location = {Haifa, Israel},
numpages = {6},
timestamp = {2014.02.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2014iui.pdf},
}

2013

F. Alt, A Design Space for Pervasive Advertising on Public Displays, Stuttgart, Germany: {Ph.D Thesis}, 2013. doi:http://dx.doi.org/10.18419/opus-3021
[BibTeX] [Abstract] [PDF]
Today, people living in cities see up to 5000 ads per day and many of themare presented on public displays. More and more of these public displays arenetworked and equipped with various types of sensors, making them part of aglobal infrastructure that is currently emerging. Such networked and interactivepublic displays provide the opportunity to create a benefit for society in the formof immersive experiences and relevant content. In this way, they can overcomethe display blindness that evolved among passersby over the years. We see twomain reasons that prevent this vision from coming true: first, public displaysare stuck with traditional advertising as the driving business model, making itdifficult for novel, interactive applications to enter the scene. Second, no commonground exists for researchers or advertisers that outline important challenges. Theprovider view and audience view need to be addressed to make open, interactivedisplay networks, successful.The main contribution made by this thesis is presenting a design space for advertisingon public displays that identifies important challenges – mainly from ahuman-computer interaction perspective. Solutions to these core challenges arepresented and evaluated, using empirical methods commonly applied in HCI.First, we look at challenges that arise from the shared use of display space. Weconducted an observational study of traditional public notice areas that allowedus to identify different stakeholders, to understand their needs and motivations, tounveil current practices used to exercise control over the display, and to understandthe interplay between space, stakeholders, and content. We present a set of designimplications for open public display networks that we applied when implementingand evaluating a digital public notice area.Second, we tackle the challenge of making the user interact by taking a closerlook at attracting attention, communicating interactivity, and enticing interaction.Attracting attention is crucial for any further action to happen. We present anapproach that exploits gaze as a powerful input modality. By adapting contentbased on gaze, we are able to show a significant increase in attention and an effecton the user’s attitude. In order to communicate interactivity, we show that themirror representation of the user is a powerful interactivity cue. Finally, in orderto entice interaction, we show that the user needs to be motivated to interact andto understand how interaction works. Findings from our experiments reveal directtouch and the mobile phone as suitable interaction technologies. In addition, thesefindings suggest that relevance of content, privacy, and security have a stronginfluence on user motivation.
@Book{alt2013diss,
author = {Alt, Florian},
publisher = {{Ph.D Thesis}},
title = {{A Design Space for Pervasive Advertising on Public Displays}},
year = {2013},
address = {Stuttgart, Germany},
note = {alt2013diss},
abstract = {Today, people living in cities see up to 5000 ads per day and many of themare presented on public displays. More and more of these public displays arenetworked and equipped with various types of sensors, making them part of aglobal infrastructure that is currently emerging. Such networked and interactivepublic displays provide the opportunity to create a benefit for society in the formof immersive experiences and relevant content. In this way, they can overcomethe display blindness that evolved among passersby over the years. We see twomain reasons that prevent this vision from coming true: first, public displaysare stuck with traditional advertising as the driving business model, making itdifficult for novel, interactive applications to enter the scene. Second, no commonground exists for researchers or advertisers that outline important challenges. Theprovider view and audience view need to be addressed to make open, interactivedisplay networks, successful.The main contribution made by this thesis is presenting a design space for advertisingon public displays that identifies important challenges – mainly from ahuman-computer interaction perspective. Solutions to these core challenges arepresented and evaluated, using empirical methods commonly applied in HCI.First, we look at challenges that arise from the shared use of display space. Weconducted an observational study of traditional public notice areas that allowedus to identify different stakeholders, to understand their needs and motivations, tounveil current practices used to exercise control over the display, and to understandthe interplay between space, stakeholders, and content. We present a set of designimplications for open public display networks that we applied when implementingand evaluating a digital public notice area.Second, we tackle the challenge of making the user interact by taking a closerlook at attracting attention, communicating interactivity, and enticing interaction.Attracting attention is crucial for any further action to happen. We present anapproach that exploits gaze as a powerful input modality. By adapting contentbased on gaze, we are able to show a significant increase in attention and an effecton the user’s attitude. In order to communicate interactivity, we show that themirror representation of the user is a powerful interactivity cue. Finally, in orderto entice interaction, we show that the user needs to be motivated to interact andto understand how interaction works. Findings from our experiments reveal directtouch and the mobile phone as suitable interaction technologies. In addition, thesefindings suggest that relevance of content, privacy, and security have a stronginfluence on user motivation.},
doi = {http://dx.doi.org/10.18419/opus-3021},
timestamp = {2013.12.07},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013diss.pdf},
}
F. Alt and B. Pfleging. Sonify – A Platform for the Sonification of Text Messages. In Proceedings of Mensch & Computer 2013 (), 2013. doi:http://dl.gi.de/handle/20.500.12116/7508
[BibTeX] [Abstract] [PDF]
Sonification of text messages offers a great potential for personalization while at the same timeallowing rich information to be mediated. For example, ringtones are the major form of personalizationon smartphones besides apps and background images. Ringtones are often used as a form of selfexpressionby the smartphone owner (e.g., using ones favorite sound track as standard ringtone), butalso to identify the caller or sender of a message (e.g., the user knows who is calling without taking thephone out of the pocket). We believe this approach to be applicable to a wide variety of text messages,such as SMS, email, or IM. In this paper, we first present a web-based platform that allows usergeneratedmappings for text sonification to be created and managed. An API enables any application tosend a text message and receive the sonification in the form of a MIDI file. To showcase the potential,we implemented an Android app that sonifies incoming SMS. Second, we evaluate the feasibility of ourapproach and show that sonified messages are equally effective as ringtones when conveying metainformation.
@InProceedings{alt2013muc,
author = {Florian Alt AND Bastian Pfleging},
booktitle = {{Proceedings of Mensch \& Computer 2013}},
title = {{Sonify -- A Platform for the Sonification of Text Messages}},
year = {2013},
note = {alt2013muc},
abstract = {Sonification of text messages offers a great potential for personalization while at the same timeallowing rich information to be mediated. For example, ringtones are the major form of personalizationon smartphones besides apps and background images. Ringtones are often used as a form of selfexpressionby the smartphone owner (e.g., using ones favorite sound track as standard ringtone), butalso to identify the caller or sender of a message (e.g., the user knows who is calling without taking thephone out of the pocket). We believe this approach to be applicable to a wide variety of text messages,such as SMS, email, or IM. In this paper, we first present a web-based platform that allows usergeneratedmappings for text sonification to be created and managed. An API enables any application tosend a text message and receive the sonification in the form of a MIDI file. To showcase the potential,we implemented an Android app that sonifies incoming SMS. Second, we evaluate the feasibility of ourapproach and show that sonified messages are equally effective as ringtones when conveying metainformation.},
doi = {http://dl.gi.de/handle/20.500.12116/7508},
owner = {florianalt},
timestamp = {2013.10.04},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013muc.pdf},
}
M. Pfeiffer, S. Schneegass, and F. Alt. Supporting Interaction in Public Space with Electrical Muscle Stimulation. In Adjunct Proceedings of the 2013 ACM International Joint Conference on Pervasive and Ubiquitous Computing (Ubicomp’13), Association for Computing Machinery Press, 2013.
[BibTeX] [Abstract] [PDF]
As displays in public space are augmented with sensors,such as the Kinect, they enable passersby to interact withthe content on the screen. As of today, feedback on theuser action in such environments is usually limited to thevisual channel. However, we believe that more immediateand intense forms, in particular haptic feedback, do notonly increase the user experience, but may also have astrong impact on user attention and memorization of thecontent encountered during the interaction. Hapticfeedback can today be achieved through vibration on themobile phone, which is strongly dependent on the locationof the device. We envision that fabrics, such as underwear,can in the future be equipped with electrical musclestimulation, thus providing a more natural and direct wayof haptic feedback. In this demo we aim to showcase thepotential of applying electrical muscle stimulation asdirect haptic feedback during interaction in public spacesin the context of a Kinect-based game for public displays.
@InProceedings{pfeiffer2013ubicompadj,
author = {Max Pfeiffer AND Stefan Schneegass AND Florian Alt},
booktitle = {{Adjunct Proceedings of the 2013 ACM International Joint Conference on Pervasive and Ubiquitous Computing}},
title = {{Supporting Interaction in Public Space with Electrical Muscle Stimulation}},
year = {2013},
note = {pfeiffer2013ubicompadj},
publisher = {Association for Computing Machinery Press},
series = {Ubicomp'13},
abstract = {As displays in public space are augmented with sensors,such as the Kinect, they enable passersby to interact withthe content on the screen. As of today, feedback on theuser action in such environments is usually limited to thevisual channel. However, we believe that more immediateand intense forms, in particular haptic feedback, do notonly increase the user experience, but may also have astrong impact on user attention and memorization of thecontent encountered during the interaction. Hapticfeedback can today be achieved through vibration on themobile phone, which is strongly dependent on the locationof the device. We envision that fabrics, such as underwear,can in the future be equipped with electrical musclestimulation, thus providing a more natural and direct wayof haptic feedback. In this demo we aim to showcase thepotential of applying electrical muscle stimulation asdirect haptic feedback during interaction in public spacesin the context of a Kinect-based game for public displays.},
owner = {florianalt},
timestamp = {2013.09.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeiffer2013ubicompadj.pdf},
}
N. Memarovic, M. Langheinrich, K. Cheverst, N. Taylor, and F. Alt. P-LAYERS – A Layered Framework Addressing the Multifaceted Issues Facing Community-Supporting Public Display Deployments. Association for Computing Machinery Trans. Comput.-Hum. Interact., vol. 20, iss. 3, p. 17:1–17:34, 2013. doi:10.1145/2491500.2491505
[BibTeX] [Abstract] [PDF]
The proliferation of digital signage systems has prompted a wealth of research that attempts to use public displays for more than just advertisement or transport schedules, such as their use for supporting communities. However, deploying and maintaining display systems “in the wild” that can support communities is challenging. Based on the authors’ experiences in designing and fielding a diverse range of community-supporting public display deployments, we identify a large set of challenges and issues that researchers working in this area are likely to encounter. Grouping them into five distinct layers – (1) hardware, (2) system architecture, (3) content, (4) system interaction, and (5) community interaction design – we draw up the P-LAYERS framework to enable a more systematic appreciation of the diverse range of issues associated with the development, the deployment, and the maintenance of such systems. Using three of our own deployments as illustrative examples, we will describe both our experiences within each individual layer, as well as point out interactions between the layers. We believe our framework provides a valuable aid for researchers looking to work in this space, alerting them to the issues they are likely to encounter during their deployments, and help them plan accordingly.
@Article{memarovic2013tochi,
author = {Memarovic, Nemanja and Langheinrich, Marc and Cheverst, Keith and Taylor, Nick and Alt, Florian},
journal = {{Association for Computing Machinery Trans. Comput.-Hum. Interact.}},
title = {{P-LAYERS -- A Layered Framework Addressing the Multifaceted Issues Facing Community-Supporting Public Display Deployments}},
year = {2013},
issn = {1073-0516},
month = jul,
note = {memarovic2013tochi},
number = {3},
pages = {17:1--17:34},
volume = {20},
abstract = {The proliferation of digital signage systems has prompted a wealth of research that attempts to use public displays for more than just advertisement or transport schedules, such as their use for supporting communities. However, deploying and maintaining display systems “in the wild” that can support communities is challenging. Based on the authors’ experiences in designing and fielding a diverse range of community-supporting public display deployments, we identify a large set of challenges and issues that researchers working in this area are likely to encounter. Grouping them into five distinct layers -- (1) hardware, (2) system architecture, (3) content, (4) system interaction, and (5) community interaction design -- we draw up the P-LAYERS framework to enable a more systematic appreciation of the diverse range of issues associated with the development, the deployment, and the maintenance of such systems. Using three of our own deployments as illustrative examples, we will describe both our experiences within each individual layer, as well as point out interactions between the layers. We believe our framework provides a valuable aid for researchers looking to work in this space, alerting them to the issues they are likely to encounter during their deployments, and help them plan accordingly.},
acmid = {2491505},
address = {New York, NY, USA},
articleno = {17},
doi = {10.1145/2491500.2491505},
issue_date = {July 2013},
keywords = {Community interaction, community needs, pervasive displays, public displays},
numpages = {34},
publisher = {Association for Computing Machinery},
timestamp = {2013.06.17},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2013tochi.pdf},
}
F. Alt, A. S. Shirazi, T. Kubitza, and A. Schmidt. Interaction techniques for creating and exchanging content with public displays. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems (CHI ’13), Association for Computing Machinery, New York, NY, USA, 2013, p. 1709–1718. doi:10.1145/2470654.2466226
[BibTeX] [Abstract] [PDF]
Falling hardware prices and ever more displays being connected to the Internet will lead to large public display networks, potentially forming a novel communication medium. We envision that such networks are not restricted to display owners and advertisers anymore, but allow also passersby (e.g., customers) to exchange content, similar to traditional public notice areas, such as bulletin boards. In this context it is crucial to understand emerging practices and provide easy and straight forward interaction techniques to be used for creating and exchanging content. In this paper, we present Digifieds, a digital public notice area we built to investigate and compare possible interaction techniques. Based on a lab study we show that using direct touch at the display as well as using the mobile phone as a complementing interaction technology are most suitable. Direct touch at the display closely resembles the interaction known from classic bulletin boards and provides the highest usability. Mobile phones preserve the users’ privacy as they exchange (sensitive) data with the display and at the same time allow content to be created on-the-go or to be retrieved.
@InProceedings{alt2013chi,
author = {Alt, Florian and Shirazi, Alireza Sahami and Kubitza, Thomas and Schmidt, Albrecht},
booktitle = {{Proceedings of the SIGCHI Conference on Human Factors in Computing Systems}},
title = {{Interaction techniques for creating and exchanging content with public displays}},
year = {2013},
address = {New York, NY, USA},
note = {alt2013chi},
pages = {1709--1718},
publisher = {Association for Computing Machinery},
series = {CHI '13},
abstract = {Falling hardware prices and ever more displays being connected to the Internet will lead to large public display networks, potentially forming a novel communication medium. We envision that such networks are not restricted to display owners and advertisers anymore, but allow also passersby (e.g., customers) to exchange content, similar to traditional public notice areas, such as bulletin boards. In this context it is crucial to understand emerging practices and provide easy and straight forward interaction techniques to be used for creating and exchanging content. In this paper, we present Digifieds, a digital public notice area we built to investigate and compare possible interaction techniques. Based on a lab study we show that using direct touch at the display as well as using the mobile phone as a complementing interaction technology are most suitable. Direct touch at the display closely resembles the interaction known from classic bulletin boards and provides the highest usability. Mobile phones preserve the users' privacy as they exchange (sensitive) data with the display and at the same time allow content to be created on-the-go or to be retrieved.},
acmid = {2466226},
doi = {10.1145/2470654.2466226},
isbn = {978-1-4503-1899-0},
keywords = {classified ads, digifieds, interaction, public displays},
location = {Paris, France},
numpages = {10},
owner = {florianalt},
timestamp = {2013.06.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013chi.pdf},
}
N. Broy, F. Alt, S. Schneegass, N. Henze, and A. Schmidt. Perceiving layered information on 3D displays using binocular disparity. In Proceedings of the 2013 ACM International Symposium on Pervasive Displays (PerDis ’13), Association for Computing Machinery, New York, NY, USA, 2013, p. 61–66. doi:10.1145/2491568.2491582
[BibTeX] [Abstract] [PDF]
3D displays are hitting the mass market. They are integrated in consumer TVs, notebooks, and mobile phones and are mainly used for virtual reality as well as video content. We see large potential in using depth also for structuring information. Our specific use case is 3D displays integrated in cars. The capabilities of such displays could be used to present relevant information to the driver in a fast and easy-to-understand way, e.g., by functionality-based clustering. However, excessive parallaxes can cause discomfort and in turn negatively influence the primary driving task. This requires a reasonable choice of parallax boundaries. The contribution of this paper is twofold. First, we identify the comfort zone when perceiving 3D content. Second, we determine a minimum depth distance between objects that still enables users to quickly and accurately separate the two depth planes. The results yield that in terms of task completion time the optimum distance from screen level is up to 35.9 arc-min angular disparity behind the screen plane. A distance of at least 2.7 arc-min difference in angular disparity between the objects significantly decreases time for layer separation. Based on the results we derive design implications.
@InProceedings{broy2013perdis,
author = {Broy, Nora and Alt, Florian and Schneegass, Stefan and Henze, Niels and Schmidt, Albrecht},
booktitle = {{Proceedings of the 2013 ACM International Symposium on Pervasive Displays}},
title = {{Perceiving layered information on 3D displays using binocular disparity}},
year = {2013},
address = {New York, NY, USA},
note = {broy2013perdis},
pages = {61--66},
publisher = {Association for Computing Machinery},
series = {PerDis '13},
abstract = {3D displays are hitting the mass market. They are integrated in consumer TVs, notebooks, and mobile phones and are mainly used for virtual reality as well as video content. We see large potential in using depth also for structuring information. Our specific use case is 3D displays integrated in cars. The capabilities of such displays could be used to present relevant information to the driver in a fast and easy-to-understand way, e.g., by functionality-based clustering. However, excessive parallaxes can cause discomfort and in turn negatively influence the primary driving task. This requires a reasonable choice of parallax boundaries. The contribution of this paper is twofold. First, we identify the comfort zone when perceiving 3D content. Second, we determine a minimum depth distance between objects that still enables users to quickly and accurately separate the two depth planes. The results yield that in terms of task completion time the optimum distance from screen level is up to 35.9 arc-min angular disparity behind the screen plane. A distance of at least 2.7 arc-min difference in angular disparity between the objects significantly decreases time for layer separation. Based on the results we derive design implications.},
acmid = {2491582},
doi = {10.1145/2491568.2491582},
isbn = {978-1-4503-2096-2},
keywords = {3D displays, automotive user interfaces, human factors},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2013perdis.pdf},
}
N. Memarovic, K. Cheverst, M. Langheinrich, I. Elhart, and F. Alt. Tethered or free to roam: the design space of limiting content access on community displays. In Proceedings of the 2013 ACM International Symposium on Pervasive Displays (PerDis ’13), Association for Computing Machinery, New York, NY, USA, 2013, p. 127–132. doi:10.1145/2491568.2491596
[BibTeX] [Abstract] [PDF]
Many design decisions need to be made when creating situated public displays that aim to serve a community. One such decision concerns access to its contents: should users be able to access content remotely, e.g., via a web page, or should this be limited to users who are co-located with the display? A similar decision has to be made for community content upload: do posters need to be co-located with the display or can posts be made from any location? In other words, content display and creation can be ‘tethered’ to a display or it can be ‘free to roam’, i.e., accessible from anywhere. In this paper we analyze prior community display deployments in an attempt to explore this space and produce a taxonomy that highlights the inherent design choices. Furthermore, we discuss some of the reasons that may underlie these choices and identify opportunities for design.
@InProceedings{memarovic2013perdis,
author = {Memarovic, Nemanja and Cheverst, Keith and Langheinrich, Marc and Elhart, Ivan and Alt, Florian},
booktitle = {{Proceedings of the 2013 ACM International Symposium on Pervasive Displays}},
title = {{Tethered or free to roam: the design space of limiting content access on community displays}},
year = {2013},
address = {New York, NY, USA},
note = {memarovic2013perdis},
pages = {127--132},
publisher = {Association for Computing Machinery},
series = {PerDis '13},
abstract = {Many design decisions need to be made when creating situated public displays that aim to serve a community. One such decision concerns access to its contents: should users be able to access content remotely, e.g., via a web page, or should this be limited to users who are co-located with the display? A similar decision has to be made for community content upload: do posters need to be co-located with the display or can posts be made from any location? In other words, content display and creation can be 'tethered' to a display or it can be 'free to roam', i.e., accessible from anywhere. In this paper we analyze prior community display deployments in an attempt to explore this space and produce a taxonomy that highlights the inherent design choices. Furthermore, we discuss some of the reasons that may underlie these choices and identify opportunities for design.},
acmid = {2491596},
doi = {10.1145/2491568.2491596},
isbn = {978-1-4503-2096-2},
keywords = {collocation, communities, content, public displays},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2013perdis.pdf},
}
F. Alt, S. Schneegass, M. Girgis, and A. Schmidt. Cognitive effects of interactive public display applications. In Proceedings of the 2013 ACM International Symposium on Pervasive Displays (PerDis ’13), Association for Computing Machinery, New York, NY, USA, 2013, p. 13–18. doi:10.1145/2491568.2491572
[BibTeX] [Abstract] [PDF]
Many public displays are nowadays equipped with different types of sensors. Such displays allow engaging and persistent user experiences to be created, e.g., in the form of gesture-controlled games or content exploration using direct touch at the display. However, as digital displays replace traditional posters and billboards, display owners are reluctant to deploy interactive content, but rather adapt traditional, non-interactive content. The main reason is, that the benefit of such interactive deployments is not obvious. Our hypothesis is that interactivity has a cognitive effect on users and therefore increases the ability to remember what they have seen on the screen – which is beneficial both for the display owner and the user. In this paper we systematically investigate the impact of interactive content on public displays on the users’ cognition in different situations. Our findings indicate that overall memorability is positively affected as users interact. Based on these findings we discuss design implications for interactive public displays.
@InProceedings{alt2013perdis,
author = {Alt, Florian and Schneegass, Stefan and Girgis, Michael and Schmidt, Albrecht},
booktitle = {{Proceedings of the 2013 ACM International Symposium on Pervasive Displays}},
title = {{Cognitive effects of interactive public display applications}},
year = {2013},
address = {New York, NY, USA},
note = {alt2013perdis},
pages = {13--18},
publisher = {Association for Computing Machinery},
series = {PerDis '13},
abstract = {Many public displays are nowadays equipped with different types of sensors. Such displays allow engaging and persistent user experiences to be created, e.g., in the form of gesture-controlled games or content exploration using direct touch at the display. However, as digital displays replace traditional posters and billboards, display owners are reluctant to deploy interactive content, but rather adapt traditional, non-interactive content. The main reason is, that the benefit of such interactive deployments is not obvious. Our hypothesis is that interactivity has a cognitive effect on users and therefore increases the ability to remember what they have seen on the screen -- which is beneficial both for the display owner and the user. In this paper we systematically investigate the impact of interactive content on public displays on the users' cognition in different situations. Our findings indicate that overall memorability is positively affected as users interact. Based on these findings we discuss design implications for interactive public displays.},
acmid = {2491572},
doi = {10.1145/2491568.2491572},
isbn = {978-1-4503-2096-2},
keywords = {digital signage, interactivity, public display, recall, recognition},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013perdis.pdf},
}
R. José, J. Cardoso, F. Alt, S. Clinch, and N. Davies. Mobile applications for open display networks: common design considerations. In Proceedings of the 2013 ACM International Symposium on Pervasive Displays (PerDis ’13), Association for Computing Machinery, New York, NY, USA, 2013, p. 97–102. doi:10.1145/2491568.2491590
[BibTeX] [Abstract] [PDF]
Mobile devices can be a powerful tool for interaction with public displays, but mobile applications supporting this form of interaction are not yet part of our everyday reality. There are no widely accepted abstractions, standards, or practices that may enable systematic interaction between mobile devices and public displays. We envision public displays to move away from a world of closed display networks to scenarios where mobile applications could allow people to interact with the myriad of displays they might encounter during their everyday trips. In this research, we study the key processes involved in this collaborative interaction between public shared displays and mobile applications. Based on the lessons learned from our own development and deployment of 3 applications, and also on the analysis of the interactive features described in the literature, we have identified 8 key processes that may shape this form of interaction: Discovery, Association, Presence Management, Exploration, Interface Migration, Controller, Media Upload and Media Download. The contribution of this work is the identification of these high-level processes and an elicitation of the main design considerations for display networks.
@InProceedings{jose2013perdis,
author = {Jos{\'e}, Rui and Cardoso, Jorge and Alt, Florian and Clinch, Sarah and Davies, Nigel},
booktitle = {{Proceedings of the 2013 ACM International Symposium on Pervasive Displays}},
title = {{Mobile applications for open display networks: common design considerations}},
year = {2013},
address = {New York, NY, USA},
note = {jose2013perdis},
pages = {97--102},
publisher = {Association for Computing Machinery},
series = {PerDis '13},
abstract = {Mobile devices can be a powerful tool for interaction with public displays, but mobile applications supporting this form of interaction are not yet part of our everyday reality. There are no widely accepted abstractions, standards, or practices that may enable systematic interaction between mobile devices and public displays. We envision public displays to move away from a world of closed display networks to scenarios where mobile applications could allow people to interact with the myriad of displays they might encounter during their everyday trips. In this research, we study the key processes involved in this collaborative interaction between public shared displays and mobile applications. Based on the lessons learned from our own development and deployment of 3 applications, and also on the analysis of the interactive features described in the literature, we have identified 8 key processes that may shape this form of interaction: Discovery, Association, Presence Management, Exploration, Interface Migration, Controller, Media Upload and Media Download. The contribution of this work is the identification of these high-level processes and an elicitation of the main design considerations for display networks.},
acmid = {2491590},
doi = {10.1145/2491568.2491590},
isbn = {978-1-4503-2096-2},
keywords = {mobile applications, open display networks, public displays},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/jose2013perdis.pdf},
}
F. Alt and S. Schneegass. Towards Understanding the Cognitive Effects of Interactivity. In Proccedings of the 1st Workshop on Experiencing Interactivity in Public Space (EIPS’13), 2013.
[BibTeX] [Abstract] [PDF]
Cheap and easy-to-deploy consumer hardware, such as theMicrosoft Kinect, touch screens, and smartphones drivean increasing proliferation of public space with interactiveapplications. Such applications include artistic, playful,and informative content on public displays. Though suchapplications are in general positively perceived by users,their benet is in many cases not clear. In this paper weargue that while most current (advertising) content onpublic displays aims at stimulating user action (e.g., makinga purchase), interactive applications are also suitableto support cognition. In our work, we focus on awarenessas one particular form of cognition and assess it by measuringrecall and recognition. This is not only interestingfor advertising but for any type of applications that requiresthe user to remember information. We contribute adesign space and map out directions for future research.
@InProceedings{alt2013eips,
author = {Florian Alt AND Stefan Schneegass},
booktitle = {{Proccedings of the 1st Workshop on Experiencing Interactivity in Public Space}},
title = {{Towards Understanding the Cognitive Effects of Interactivity}},
year = {2013},
note = {alt2013eips},
series = {EIPS'13},
abstract = {Cheap and easy-to-deploy consumer hardware, such as theMicrosoft Kinect, touch screens, and smartphones drivean increasing proliferation of public space with interactiveapplications. Such applications include artistic, playful,and informative content on public displays. Though suchapplications are in general positively perceived by users,their benet is in many cases not clear. In this paper weargue that while most current (advertising) content onpublic displays aims at stimulating user action (e.g., makinga purchase), interactive applications are also suitableto support cognition. In our work, we focus on awarenessas one particular form of cognition and assess it by measuringrecall and recognition. This is not only interestingfor advertising but for any type of applications that requiresthe user to remember information. We contribute adesign space and map out directions for future research.},
owner = {florianalt},
timestamp = {2013.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013eips.pdf},
}

2012

N. Memarovic, M. Langheinrich, F. Alt, I. Elhart, S. Hosio, and E. Rubegni. Using Public Displays to Stimulate Passive Engagement, Active Engagement, and Discovery in Public Spaces. In Proceedings of the 4th Media Architecture Biennale Conference: Participation (MAB ’12), Association for Computing Machinery, New York, NY, USA, 2012, p. 55–64. doi:10.1145/2421076.2421086
[BibTeX] [Abstract] [PDF]
In their influential book “Public space” Carr et al. describe essential human needs that public spaces fulfill: (1) passive engagement with the environment, where we observe what others are doing; (2) active engagement through intellectual challenges posed by the space, or through engagement with the people in it; and (3) excitement of novel discoveries within the space. An often underused resource in public spaces – public displays – can be used to stimulate these needs. In this paper we argue for a new research direction that explores how public displays can stimulate such essential needs in public spaces. We describe and conceptualize related processes that occur around public displays, based on in-depth observations of people interacting with a publicly fielded display application in a city center. Our conceptualization is meant to lay the foundations for designing engaging public display systems that stimulate PACD, and for supporting the analysis of existing deployments.
@InProceedings{memarovic2012mab,
author = {Memarovic, Nemanja and Langheinrich, Marc and Alt, Florian and Elhart, Ivan and Hosio, Simo and Rubegni, Elisa},
booktitle = {{Proceedings of the 4th Media Architecture Biennale Conference: Participation}},
title = {{Using Public Displays to Stimulate Passive Engagement, Active Engagement, and Discovery in Public Spaces}},
year = {2012},
address = {New York, NY, USA},
note = {memarovic2012mab},
pages = {55--64},
publisher = {Association for Computing Machinery},
series = {MAB '12},
abstract = {In their influential book "Public space" Carr et al. describe essential human needs that public spaces fulfill: (1) passive engagement with the environment, where we observe what others are doing; (2) active engagement through intellectual challenges posed by the space, or through engagement with the people in it; and (3) excitement of novel discoveries within the space. An often underused resource in public spaces -- public displays -- can be used to stimulate these needs. In this paper we argue for a new research direction that explores how public displays can stimulate such essential needs in public spaces. We describe and conceptualize related processes that occur around public displays, based on in-depth observations of people interacting with a publicly fielded display application in a city center. Our conceptualization is meant to lay the foundations for designing engaging public display systems that stimulate PACD, and for supporting the analysis of existing deployments.},
acmid = {2421086},
doi = {10.1145/2421076.2421086},
isbn = {978-1-4503-1792-4},
keywords = {community interaction, identity cognition, public displays, public space, urban computing, urban informatics},
location = {Aarhus, Denmark},
numpages = {10},
timestamp = {2012.11.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2012mab.pdf},
}
F. Alt, A. S. Shirazi, A. Schmidt, and J. Mennenöh. Increasing the User’s Attention on the Web: Using Implicit Interaction Based on Gaze Behavior to Tailor Content. In Proceedings of the 7th Nordic Conference on Human-Computer Interaction: Making Sense Through Design (NordiCHI ’12), Association for Computing Machinery, New York, NY, USA, 2012, p. 544–553. doi:10.1145/2399016.2399099
[BibTeX] [Abstract] [PDF]
The World Wide Web has evolved into a widely used interactive application platform, providing information, products, and services. With eye trackers we envision that gaze information as an additional input channel can be used in the future to adapt and tailor web content (e.g., news, information, ads) towards the users’ attention as they implicitly interact with web pages. We present a novel approach, which allows web content to be customized on-the-fly based on the the user’s gaze behavior (dwell time, duration of fixations, and number of fixations). Our system analyzes the gaze path on a page and uses this information to create adaptive content on subsequent pages. As a proof-of-concept we report on a case study with 12 participants. We presented them both randomly chosen content (baseline) as well as content chosen based on their gaze-behavior. We found a significant increase of attention towards the adapted content and evidence for changes in the user attitude based on the Elaboration Likelihood Model.
@InProceedings{alt2012nordichi,
author = {Alt, Florian and Shirazi, Alireza Sahami and Schmidt, Albrecht and Mennen\"{o}h, Julian},
booktitle = {{Proceedings of the 7th Nordic Conference on Human-Computer Interaction: Making Sense Through Design}},
title = {{Increasing the User's Attention on the Web: Using Implicit Interaction Based on Gaze Behavior to Tailor Content}},
year = {2012},
address = {New York, NY, USA},
note = {alt2012nordichi},
pages = {544--553},
publisher = {Association for Computing Machinery},
series = {NordiCHI '12},
abstract = {The World Wide Web has evolved into a widely used interactive application platform, providing information, products, and services. With eye trackers we envision that gaze information as an additional input channel can be used in the future to adapt and tailor web content (e.g., news, information, ads) towards the users' attention as they implicitly interact with web pages. We present a novel approach, which allows web content to be customized on-the-fly based on the the user's gaze behavior (dwell time, duration of fixations, and number of fixations). Our system analyzes the gaze path on a page and uses this information to create adaptive content on subsequent pages. As a proof-of-concept we report on a case study with 12 participants. We presented them both randomly chosen content (baseline) as well as content chosen based on their gaze-behavior. We found a significant increase of attention towards the adapted content and evidence for changes in the user attitude based on the Elaboration Likelihood Model.},
acmid = {2399099},
doi = {10.1145/2399016.2399099},
isbn = {978-1-4503-1482-4},
keywords = {adaptative content, eye tracking, implicit interaction},
location = {Copenhagen, Denmark},
numpages = {10},
timestamp = {2012.10.19},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012nordichi.pdf},
}
F. Alt, A. Sahami Shirazi, A. Schmidt, and R. Atterer. Bridging Waiting Times on Web Pages. In Proceedings of the 14th International Conference on Human-computer Interaction with Mobile Devices and Services (MobileHCI ’12), Association for Computing Machinery, New York, NY, USA, 2012, p. 305–308. doi:10.1145/2371574.2371619
[BibTeX] [Abstract] [PDF]
High-speed Internet connectivity makes browsing a convenient task. However, there are many situations in which surfing the web is still slow due to limited bandwidth, slow servers, or complex queries. As a result, loading web pages can take several seconds, making (mobile) browsing cumbersome. We present an approach which makes use of the time spent on waiting for the next page, by bridging the wait with extra cached or preloaded content. We show how the content (e.g., news, Twitter) can be adapted to the user’s interests and to the context of use, hence making mobile surfing more comfortable. We compare two approaches: in time-multiplex mode, the entire screen displays bridging content until the loading is finished. In space-multiplex mode, content is displayed alongside the requested content while it loads. We use an HTTP proxy to intercept requests and add JavaScript code, which allows the bridging content from websites of our choice to be inserted. The approach was evaluated with 15 participants, assessing suitable content and usability.
@InProceedings{alt2012mobilehci,
author = {Alt, Florian and Sahami Shirazi, Alireza and Schmidt, Albrecht and Atterer, Richard},
booktitle = {{Proceedings of the 14th International Conference on Human-computer Interaction with Mobile Devices and Services}},
title = {{Bridging Waiting Times on Web Pages}},
year = {2012},
address = {New York, NY, USA},
note = {alt2012mobilehci},
pages = {305--308},
publisher = {Association for Computing Machinery},
series = {MobileHCI '12},
abstract = {High-speed Internet connectivity makes browsing a convenient task. However, there are many situations in which surfing the web is still slow due to limited bandwidth, slow servers, or complex queries. As a result, loading web pages can take several seconds, making (mobile) browsing cumbersome. We present an approach which makes use of the time spent on waiting for the next page, by bridging the wait with extra cached or preloaded content. We show how the content (e.g., news, Twitter) can be adapted to the user's interests and to the context of use, hence making mobile surfing more comfortable. We compare two approaches: in time-multiplex mode, the entire screen displays bridging content until the loading is finished. In space-multiplex mode, content is displayed alongside the requested content while it loads. We use an HTTP proxy to intercept requests and add JavaScript code, which allows the bridging content from websites of our choice to be inserted. The approach was evaluated with 15 participants, assessing suitable content and usability.},
acmid = {2371619},
doi = {10.1145/2371574.2371619},
isbn = {978-1-4503-1105-2},
keywords = {mobile device, waiting time, www},
location = {San Francisco, California, USA},
numpages = {4},
timestamp = {2012.10.18},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mobilehci.pdf},
}
B. Pfleging, F. Alt, and A. Schmidt. Meaningful Melodies – Personal Sonification of Text Messages for Mobile Devices. In Adjunct Proceedings of the 14th Association for Computing Machinery SIGCHI’s International Conference on Human-Computer Interaction with Mobile Devices and Services (MobileHCI’12), San Francisco, CA, US, 2012. doi:10.1145/2371664.2371706
[BibTeX] [Abstract] [PDF]
Mobile phones oer great potential for personalization.Besides apps and background images, ringtones are themajor form of personalization. They are most often usedto have a personal sound for incoming texts and calls.Furthermore, ringtones are used to identify the caller orsender of a message. In parts, this function is utilitarian(e.g., caller identication without looking at the phone)but it is also a form of self-expression (e.g., favorite tuneas standard ringtone). We investigate how audio can beused to convey richer information. In this demo we showhow sonications of SMS can be used to encode informationabout the sender’s identity as well as the content andintention of a message based on exible, user-generatedmappings. We present a platform that allows arbitrarymappings to be managed and apps to be connected inorder to create a sonication of any message. Using abackground app on Android, we show the utility of theapproach for mobile devices.
@InProceedings{pfleging2012mobilehciadj,
author = {Bastian Pfleging and Florian Alt and Albrecht Schmidt},
booktitle = {{Adjunct Proceedings of the 14th Association for Computing Machinery SIGCHI's International Conference on Human-Computer Interaction with Mobile Devices and Services}},
title = {{Meaningful Melodies - Personal Sonification of Text Messages for Mobile Devices}},
year = {2012},
address = {San Francisco, CA, US},
month = {sep},
note = {pfleging2012mobilehciadj},
series = {MobileHCI'12},
abstract = {Mobile phones oer great potential for personalization.Besides apps and background images, ringtones are themajor form of personalization. They are most often usedto have a personal sound for incoming texts and calls.Furthermore, ringtones are used to identify the caller orsender of a message. In parts, this function is utilitarian(e.g., caller identication without looking at the phone)but it is also a form of self-expression (e.g., favorite tuneas standard ringtone). We investigate how audio can beused to convey richer information. In this demo we showhow sonications of SMS can be used to encode informationabout the sender's identity as well as the content andintention of a message based on exible, user-generatedmappings. We present a platform that allows arbitrarymappings to be managed and apps to be connected inorder to create a sonication of any message. Using abackground app on Android, we show the utility of theapproach for mobile devices.},
doi = {10.1145/2371664.2371706},
owner = {flo},
timestamp = {2012.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfleging2012mobilehciadj.pdf},
}
S. Schneegass, F. Alt, and A. Schmidt. Mobile Interaction with Ads on Public Display Networks. In Proceedings of the 10th International Conference on Mobile Systems, Applications, and Services (MobiSys’12), 2012, p. 479–480.
[BibTeX] [Abstract] [PDF]
In public places we can observe that many conventional displaysare replaced by digital displays, a lot of them networked. Thesedisplays mainly show advertising in a similar way to television´scommercial break not exploiting the opportunities of the new medium[1]. Several approaches of interaction between mobile devicesand public displays have been investigated over the last 15years. In this demo we concentrate on challenges that are specificto public displays used for advertising. In particular we focus onhow new approaches for interaction with content, means for contentcreation, and tools for follow-ups can be implemented basedon mobile devices. With Digifieds we present a research systemthat has been used to explore different research questions and toshowcase the potential of interactive advertising in public space.
@InProceedings{schneegass2012mobisysadj,
author = {Schneegass, S. and Alt, F. and Schmidt, A.},
booktitle = {{Proceedings of the 10th International Conference on Mobile Systems, Applications, and Services}},
title = {{Mobile Interaction with Ads on Public Display Networks}},
year = {2012},
note = {schneegass2012mobisysadj},
organization = {Association for Computing Machinery},
pages = {479--480},
series = {MobiSys'12},
abstract = {In public places we can observe that many conventional displaysare replaced by digital displays, a lot of them networked. Thesedisplays mainly show advertising in a similar way to television´scommercial break not exploiting the opportunities of the new medium[1]. Several approaches of interaction between mobile devicesand public displays have been investigated over the last 15years. In this demo we concentrate on challenges that are specificto public displays used for advertising. In particular we focus onhow new approaches for interaction with content, means for contentcreation, and tools for follow-ups can be implemented basedon mobile devices. With Digifieds we present a research systemthat has been used to explore different research questions and toshowcase the potential of interactive advertising in public space.},
journal = {Proceedings of the 10th International Conference on Mobile Systems, Applications, and Services},
timestamp = {2012.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2012mobisysadj.pdf},
}
F. Alt, S. Schneegass, A. Schmidt, J. Müller, and N. Memarovic. How to Evaluate Public Displays. In Proceedings of the 2012 International Symposium on Pervasive Displays (PerDis’12), Association for Computing Machinery, New York, NY, USA, 2012, p. 171–176. doi:10.1145/2307798.2307815
[BibTeX] [Abstract] [PDF]
After years in the lab, interactive public displays are finding their way into public spaces, shop windows, and public institutions. They are equipped with a multitude of sensors as well as (multi-) touch surfaces allowing not only the audience to be sensed, but also their effectiveness to be measured. The lack of generally accepted design guidelines for public displays and the fact that there are many different objectives (e.g., increasing attention, optimizing interaction times, finding the best interaction technique) make it a challenging task to pick the most suitable evaluation method. Based on a literature survey and our own experiences, this paper provides an overview of study types, paradigms, and methods for evaluation both in the lab and in the real world. Following a discussion of design challenges, we provide a set of guidelines for researchers and practitioners alike to be applied when evaluating public displays.
@InProceedings{alt2012perdis,
author = {Alt, Florian and Schneegass, Stefan and Schmidt, Albrecht and M\"{u}ller, J\"{o}rg and Memarovic, Nemanja},
booktitle = {{Proceedings of the 2012 International Symposium on Pervasive Displays}},
title = {{How to Evaluate Public Displays}},
year = {2012},
address = {New York, NY, USA},
month = {jun},
note = {alt2012perdis},
pages = {171--176},
publisher = {Association for Computing Machinery},
series = {PerDis'12},
abstract = {After years in the lab, interactive public displays are finding their way into public spaces, shop windows, and public institutions. They are equipped with a multitude of sensors as well as (multi-) touch surfaces allowing not only the audience to be sensed, but also their effectiveness to be measured. The lack of generally accepted design guidelines for public displays and the fact that there are many different objectives (e.g., increasing attention, optimizing interaction times, finding the best interaction technique) make it a challenging task to pick the most suitable evaluation method. Based on a literature survey and our own experiences, this paper provides an overview of study types, paradigms, and methods for evaluation both in the lab and in the real world. Following a discussion of design challenges, we provide a set of guidelines for researchers and practitioners alike to be applied when evaluating public displays.},
acmid = {2307815},
articleno = {17},
doi = {10.1145/2307798.2307815},
isbn = {978-1-4503-1414-5},
keywords = {digital signage, evaluation, methods, public displays},
location = {Porto, Portugal},
numpages = {6},
timestamp = {2012.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012perdis.pdf},
}
N. Memarovic, M. Langheinrich, and F. Alt. The Interacting Places Framework: Conceptualizing Public Display Applications that Promote Community Interaction and Place Awareness. In Proceedings of the 2012 international symposium on pervasive displays (PerDis’12), Association for Computing Machinery, New York, NY, USA, 2012, p. 71–76. doi:10.1145/2307798.2307805
[BibTeX] [Abstract] [PDF]
The proliferation of public displays, along with ubiquitous wireless communication and sensing technology, has made it possible to create a novel public communication medium: open networked pervasive displays would allow citizens to provide their own content, appropriate close-by displays, and increase their own awareness of a display’s surroundings and its local communities. We envision that such displays can create interacting places, i. e., public spaces that promote community interaction and place awareness. In this paper we describe our Interacting Places Framework (IPF), a conceptual framework for designing applications in this novel research space that we developed based on four distinct public display studies. Our IPF focuses on 4 elements: 1) content providers, i. e., entities that will supply content; 2) content viewers, i. e., people who are addressed by the content; 3) communication channels that deliver the content and range from inclusive, i. e., open-for-everyone, to exclusive, i. e., closed-group channels; and 4) an awareness diffusion layer that describes how community awareness building happens both explicitly, i. e., through content tailored towards a specific audience, and implicitly, by observing output for other people.
@InProceedings{memarovic2012perdis,
author = {Memarovic, Nemanja and Langheinrich, Marc and Alt, Florian},
booktitle = {Proceedings of the 2012 International Symposium on Pervasive Displays},
title = {{The Interacting Places Framework: Conceptualizing Public Display Applications that Promote Community Interaction and Place Awareness}},
year = {2012},
address = {New York, NY, USA},
month = {jun},
note = {memarovic2012perdis},
pages = {71--76},
publisher = {Association for Computing Machinery},
series = {PerDis'12},
abstract = {The proliferation of public displays, along with ubiquitous wireless communication and sensing technology, has made it possible to create a novel public communication medium: open networked pervasive displays would allow citizens to provide their own content, appropriate close-by displays, and increase their own awareness of a display's surroundings and its local communities. We envision that such displays can create interacting places, i. e., public spaces that promote community interaction and place awareness. In this paper we describe our Interacting Places Framework (IPF), a conceptual framework for designing applications in this novel research space that we developed based on four distinct public display studies. Our IPF focuses on 4 elements: 1) content providers, i. e., entities that will supply content; 2) content viewers, i. e., people who are addressed by the content; 3) communication channels that deliver the content and range from inclusive, i. e., open-for-everyone, to exclusive, i. e., closed-group channels; and 4) an awareness diffusion layer that describes how community awareness building happens both explicitly, i. e., through content tailored towards a specific audience, and implicitly, by observing output for other people.},
acmid = {2307805},
articleno = {7},
doi = {10.1145/2307798.2307805},
isbn = {978-1-4503-1414-5},
keywords = {community interaction, interacting places, public displays, urban Computing, urban informatics},
location = {Porto, Portugal},
numpages = {6},
timestamp = {2012.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2012perdis.pdf},
}
F. Alt and S. Schneegass. A Conceptual Architecture for Pervasive Advertising in Public Display Networks. In Proceedings of the 3rd Workshop on Infrastructure and Design Challenges of Coupled Display Visual Interfaces (PPD’12), 2012.
[BibTeX] [Abstract] [PDF]
This paper presents a conceptual architecture for pervasiveadvertising on public displays. It can help researchers andpractitioners to inform the design of future display networks.Due to falling hardware prices we see a strong proliferationof (public) places with displays and it is not only large outdooradvertisers anymore operating them. However, publicdisplays currently fail to attract the attention of the user– a challenge that could be overcome by networking displaysand deploying sensors that allow novel interaction techniquesand engaging user experiences to be created. Onemajor question is how to design an appropriate infrastructurethat caters to the conflicting needs of the involved stakeholders.Users want interesting content and their privacy beingrespected, advertisers want to gather the user’s data, anddisplay owners want to be in control of the content as theyfund the infrastructure. We identify the core components anddiscuss how control can be appropriately distributed amongstakeholders by presenting three different forms of the architecture(user-centered, advertiser-centered, trusted).
@InProceedings{alt2012ppd,
author = {Florian Alt AND Stefan Schneegass},
booktitle = {{Proceedings of the 3rd Workshop on Infrastructure and Design Challenges of Coupled Display Visual Interfaces}},
title = {{A Conceptual Architecture for Pervasive Advertising in Public Display Networks}},
year = {2012},
month = jun,
note = {alt2012ppd},
series = {PPD'12},
abstract = {This paper presents a conceptual architecture for pervasiveadvertising on public displays. It can help researchers andpractitioners to inform the design of future display networks.Due to falling hardware prices we see a strong proliferationof (public) places with displays and it is not only large outdooradvertisers anymore operating them. However, publicdisplays currently fail to attract the attention of the user– a challenge that could be overcome by networking displaysand deploying sensors that allow novel interaction techniquesand engaging user experiences to be created. Onemajor question is how to design an appropriate infrastructurethat caters to the conflicting needs of the involved stakeholders.Users want interesting content and their privacy beingrespected, advertisers want to gather the user’s data, anddisplay owners want to be in control of the content as theyfund the infrastructure. We identify the core components anddiscuss how control can be appropriately distributed amongstakeholders by presenting three different forms of the architecture(user-centered, advertiser-centered, trusted).},
timestamp = {2012.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012ppd.pdf},
}
F. Alt, D. Michelis, and J. Müller, “Pervasive Advertising Technologies,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 121–128.
[BibTeX] [PDF]
@InBook{alt2012mediacultures2e,
author = {Florian Alt AND Daniel Michelis AND J\"{o}rg M\"{u}ller},
chapter = {{Digital Black Boards (english)}},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
pages = {121--128},
publisher = {Av Edition},
title = {{Pervasive Advertising Technologies}},
year = {2012},
month = {may},
note = {alt2012mediacultures2e},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures2e.pdf},
}
F. Alt, D. Michelis, and J. Müller, “Pervasive Advertising – Technologien, Konzepte, Herausforderungen,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 331–338.
[BibTeX] [PDF]
@InBook{alt2012mediacultures2d,
author = {Florian Alt AND Daniel Michelis AND J\"{o}rg M\"{u}ller},
chapter = {{Digital Black Boards (english)}},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
pages = {331--338},
publisher = {Av Edition},
title = {{Pervasive Advertising -- Technologien, Konzepte, Herausforderungen}},
year = {2012},
month = {may},
note = {alt2012mediacultures2d},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures2d.pdf},
}
F. Alt, “Digitale Schwarze Bretter,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 317–321.
[BibTeX] [PDF]
@InBook{alt2012mediacultures1d,
author = {Florian Alt},
chapter = {{Digital Black Boards (english)}},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
pages = {317--321},
publisher = {Av Edition},
title = {{Digitale Schwarze Bretter}},
year = {2012},
month = {may},
note = {alt2012mediacultures1d},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures1d.pdf},
}
F. Alt, “Digital Black Boards,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 117–120.
[BibTeX] [PDF]
@InBook{alt2012mediacultures1e,
author = {Florian Alt},
chapter = {{Digital Black Boards (english)}},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
pages = {117--120},
publisher = {Av Edition},
title = {{Digital Black Boards}},
year = {2012},
month = {may},
note = {alt2012mediacultures1e},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures1e.pdf},
}
J. Müller, R. Walter, G. Bailly, M. Nischt, and F. Alt. Looking Glass: A Field Study on Noticing Interactivity of a Shop Window (Video). In Adjunct Proceedings of the 2012 Association for Computing Machinery Conference on Human Factors in Computing Systems (CHI’12), Association for Computing Machinery, New York, NY, USA, 2012, p. 297–306. doi:10.1145/2207676.2207718
[BibTeX] [PDF]
@InProceedings{mueller2012chivideo,
author = {M\"{u}ller, J\"{o}rg and Walter, Robert and Bailly, Gilles and Nischt, Michael and Alt, Florian},
booktitle = {{Adjunct Proceedings of the 2012 Association for Computing Machinery Conference on Human Factors in Computing Systems}},
title = {{Looking Glass: A Field Study on Noticing Interactivity of a Shop Window (Video)}},
year = {2012},
address = {New York, NY, USA},
month = {apr},
pages = {297--306},
publisher = {Association for Computing Machinery},
series = {CHI'12},
acmid = {2207718},
doi = {10.1145/2207676.2207718},
isbn = {978-1-4503-1015-4},
keywords = {interactivity, noticing interactivity, public displays, User representation},
location = {Austin, Texas, USA},
numpages = {10},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2012chi.pdf},
}
F. Alt, J. Müller, and A. Schmidt. Advertising on Public Display Networks. IEEE Computer, vol. 45, iss. 5, pp. 50-56, 2012. doi:10.1109/MC.2012.150
[BibTeX] [Abstract] [PDF]
For advertising-based public display networks to become truly pervasive, they must provide a tangible social benefit and be engaging without being obtrusive, blending advertisements with informative content.
@Article{alt2012computer,
author = {Florian Alt and J{\"o}rg M{\"u}ller and Albrecht Schmidt},
journal = {{IEEE Computer}},
title = {{Advertising on Public Display Networks}},
year = {2012},
month = {may},
note = {alt2012computer},
number = {5},
pages = {50-56},
volume = {45},
abstract = {For advertising-based public display networks to become truly pervasive, they must provide a tangible social benefit and be engaging without being obtrusive, blending advertisements with informative content.},
bibsource = {DBLP, http://dblp.uni-trier.de},
doi = {10.1109/MC.2012.150},
ee = {http://doi.ieeecomputersociety.org/10.1109/MC.2012.150},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012computer.pdf},
}
J. Müller, R. Walter, G. Bailly, M. Nischt, and F. Alt. Looking Glass: A Field Study on Noticing Interactivity of a Shop Window. In Proceedings of the 2012 Association for Computing Machinery Conference on Human Factors in Computing Systems (CHI’12), Association for Computing Machinery, New York, NY, USA, 2012, p. 297–306. doi:10.1145/2207676.2207718
[BibTeX] [Abstract] [PDF]
In this paper we present our findings from a lab and a field study investigating how passers-by notice the interactivity of public displays. We designed an interactive installation that uses visual feedback to the incidental movements of passers-by to communicate its interactivity. The lab study reveals: (1) Mirrored user silhouettes and images are more effective than avatar-like representations. (2) It takes time to notice the interactivity (approx. 1.2s). In the field study, three displays were installed during three weeks in shop windows, and data about 502 interaction sessions were collected. Our observations show: (1) Significantly more passers-by interact when immediately showing the mirrored user image (+90%) or silhouette (+47%) compared to a traditional attract sequence with call-to-action. (2) Passers-by often notice interactivity late and have to walk back to interact (the landing effect). (3) If somebody is already interacting, others begin interaction behind the ones already interacting, forming multiple rows (the honeypot effect). Our findings can be used to design public display applications and shop windows that more effectively communicate interactivity to passers-by.
@InProceedings{mueller2012chi,
author = {M\"{u}ller, J\"{o}rg and Walter, Robert and Bailly, Gilles and Nischt, Michael and Alt, Florian},
booktitle = {{Proceedings of the 2012 Association for Computing Machinery Conference on Human Factors in Computing Systems}},
title = {{Looking Glass: A Field Study on Noticing Interactivity of a Shop Window}},
year = {2012},
address = {New York, NY, USA},
month = {apr},
note = {mueller2012chi},
pages = {297--306},
publisher = {Association for Computing Machinery},
series = {CHI'12},
abstract = {In this paper we present our findings from a lab and a field study investigating how passers-by notice the interactivity of public displays. We designed an interactive installation that uses visual feedback to the incidental movements of passers-by to communicate its interactivity. The lab study reveals: (1) Mirrored user silhouettes and images are more effective than avatar-like representations. (2) It takes time to notice the interactivity (approx. 1.2s). In the field study, three displays were installed during three weeks in shop windows, and data about 502 interaction sessions were collected. Our observations show: (1) Significantly more passers-by interact when immediately showing the mirrored user image (+90%) or silhouette (+47%) compared to a traditional attract sequence with call-to-action. (2) Passers-by often notice interactivity late and have to walk back to interact (the landing effect). (3) If somebody is already interacting, others begin interaction behind the ones already interacting, forming multiple rows (the honeypot effect). Our findings can be used to design public display applications and shop windows that more effectively communicate interactivity to passers-by.},
acmid = {2207718},
doi = {10.1145/2207676.2207718},
isbn = {978-1-4503-1015-4},
keywords = {interactivity, noticing interactivity, public displays, User representation},
location = {Austin, Texas, USA},
numpages = {10},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2012chi.pdf},
}
A. Bulling, F. Alt, and A. Schmidt. Increasing The Security Of Gaze-Based Cued-Recall Graphical Passwords Using Saliency Masks. In Proceedings of the 2012 Association for Computing Machinery Annual Conference on Human Factors in Computing Systems (CHI’12), Association for Computing Machinery, New York, NY, USA, 2012, p. 3011–3020. doi:10.1145/2207676.2208712
[BibTeX] [Abstract] [PDF]
With computers being used ever more ubiquitously in situations where privacy is important, secure user authentication is a central requirement. Gaze-based graphical passwords are a particularly promising means for shoulder-surfing-resistant authentication, but selecting secure passwords remains challenging. In this paper, we present a novel gaze-based authentication scheme that makes use of cued-recall graphical passwords on a single image. In order to increase password security, our approach uses a computational model of visual attention to mask those areas of the image that are most likely to attract visual attention. We create a realistic threat model for attacks that may occur in public settings, such as filming the user’s interaction while drawing money from an ATM. Based on a 12-participant user study, we show that our approach is significantly more secure than a standard image-based authentication and gaze-based 4-digit PIN entry.
@InProceedings{bulling2012chi,
author = {Bulling, Andreas and Alt, Florian and Schmidt, Albrecht},
booktitle = {{Proceedings of the 2012 Association for Computing Machinery Annual Conference on Human Factors in Computing Systems}},
title = {{Increasing The Security Of Gaze-Based Cued-Recall Graphical Passwords Using Saliency Masks}},
year = {2012},
address = {New York, NY, USA},
month = {apr},
note = {bulling2012chi},
pages = {3011--3020},
publisher = {Association for Computing Machinery},
series = {CHI'12},
abstract = {With computers being used ever more ubiquitously in situations where privacy is important, secure user authentication is a central requirement. Gaze-based graphical passwords are a particularly promising means for shoulder-surfing-resistant authentication, but selecting secure passwords remains challenging. In this paper, we present a novel gaze-based authentication scheme that makes use of cued-recall graphical passwords on a single image. In order to increase password security, our approach uses a computational model of visual attention to mask those areas of the image that are most likely to attract visual attention. We create a realistic threat model for attacks that may occur in public settings, such as filming the user's interaction while drawing money from an ATM. Based on a 12-participant user study, we show that our approach is significantly more secure than a standard image-based authentication and gaze-based 4-digit PIN entry.},
acmid = {2208712},
doi = {10.1145/2207676.2208712},
isbn = {978-1-4503-1015-4},
keywords = {cued-recall graphical passwords, eye tracking, gaze-based, saliency masks, User authentication},
location = {Austin, Texas, USA},
numpages = {10},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/bulling2012chi.pdf},
}
N. Memarovic, M. Langheinrich, and F. Alt. Interacting places – A framework for promoting community interaction and place awareness through public displays. In 2012 IEEE International Conference on Pervasive Computing and Communications Workshops (), 2012, pp. 327-430. doi:10.1109/PerComW.2012.6197526
[BibTeX] [Abstract] [PDF]
The proliferation of public displays, along withubiquitous wireless communication and sensing technology,has made it possible to create a novel public communicationmedium: open networked pervasive displays would allowcitizens to provide their own content, appropriate close-bydisplays, and increase their own awareness of a display’ssurroundings and its local communities. We envision that suchdisplays ultimately can create interacting places, i.e., publicspaces that promote community interaction and placeawareness. In this paper we describe our Interacting PlacesFramework (IPF), which helps to identify challenges andopportunities in this novel research space. Our IPF has 4elements: 1) content providers, i.e., entities that supply content;2) content viewers, i.e., people who consume the content; 3) anumber of interacting places communication channels thatsupport inclusive, i.e., open-for-everyone, and exclusive, i.e.,closed-group communication; and 4) an awareness diffusionlayer that promotes community interaction either explicitly, i.e.,through content tailored towards a specific audience, orimplicitly, by observing output for other people. We havebegun initial deployments examining this space and will use theframework presented here to analyze future results.
@InProceedings{memarovic2012percomadj,
author = {N. Memarovic and M. Langheinrich and F. Alt},
booktitle = {{2012 IEEE International Conference on Pervasive Computing and Communications Workshops}},
title = {{Interacting places - A framework for promoting community interaction and place awareness through public displays}},
year = {2012},
month = {March},
note = {memarovic2012percomadj},
pages = {327-430},
abstract = {The proliferation of public displays, along withubiquitous wireless communication and sensing technology,has made it possible to create a novel public communicationmedium: open networked pervasive displays would allowcitizens to provide their own content, appropriate close-bydisplays, and increase their own awareness of a display’ssurroundings and its local communities. We envision that suchdisplays ultimately can create interacting places, i.e., publicspaces that promote community interaction and placeawareness. In this paper we describe our Interacting PlacesFramework (IPF), which helps to identify challenges andopportunities in this novel research space. Our IPF has 4elements: 1) content providers, i.e., entities that supply content;2) content viewers, i.e., people who consume the content; 3) anumber of interacting places communication channels thatsupport inclusive, i.e., open-for-everyone, and exclusive, i.e.,closed-group communication; and 4) an awareness diffusionlayer that promotes community interaction either explicitly, i.e.,through content tailored towards a specific audience, orimplicitly, by observing output for other people. We havebegun initial deployments examining this space and will use theframework presented here to analyze future results.},
doi = {10.1109/PerComW.2012.6197526},
keywords = {liquid crystal displays;mobile computing;public utilities;social sciences;wireless sensor networks;public displays;ubiquitous wireless communication;public communication medium;open networked pervasive displays;close-by displays;local communities;public spaces;community interaction;place awareness;content providers;content viewers;interacting place communication channel;open-for-everyone communication channel;exclusive communication channel;inclusive communication channel;closed-group communication channel;awareness diffusion layer;wireless sensing technology;Communities;Communication channels;Mobile handsets;Presses;Instruments;Educational institutions;Cities and towns;community interaction;interacting places;public displays;urban computing;urban informatics},
timestamp = {2012.04.17},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2012percomadj.pdf},
}
A. Schmidt, B. Pfleging, F. Alt, A. Sahami, and G. Fitzpatrick. Interacting with 21st Century Computers. IEEE Pervasive Computing, vol. 11, iss. 1, pp. 22-31, 2012. doi:10.1109/MPRV.2011.81
[BibTeX] [Abstract] [PDF]
This paper reflects on four themes from Weiser’s original vision from a human-computer interaction perspective: computing everywhere, personal computing, the social dimension of computing, and . The authors review developments both in accordance with and contrasting this vision.
@Article{schmidt2012pervasivecomputing,
author = {A. Schmidt and B. Pfleging and F. Alt and A. Sahami and G. Fitzpatrick},
journal = {{IEEE Pervasive Computing}},
title = {{Interacting with 21st Century Computers}},
year = {2012},
issn = {1536-1268},
month = {January},
note = {schmidt2012pervasivecomputing},
number = {1},
pages = {22-31},
volume = {11},
abstract = {This paper reflects on four themes from Weiser's original vision from a human-computer interaction perspective: computing everywhere, personal computing, the social dimension of computing, and . The authors review developments both in accordance with and contrasting this vision.},
doi = {10.1109/MPRV.2011.81},
keywords = {data privacy;human computer interaction;social aspects of automation;human-computer interaction perspective;Weiser perspective;computing everywhere perspective;personal computing perspective;social dimension perspective;privacy implication;Pervasive computing;User/Machine Systems;User Interfaces;Multimedia Information Systems;Evolutionary prototyping;Human Factors in Software Design;User interfaces.},
timestamp = {2012.04.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schmidt2012pervasivecomputing.pdf},
}

2011

T. Ning, J. Müller, R. Walter, G. Bailly, C. Wacharamanotham, J. Borchers, and F. Alt. No Need To Stop: Menu Techniques for Passing by Public Displays. In Proceedings of the CHI Workshop on Large Displays in Urban Life (), Vancouver, BC, Canada, 2011.
[BibTeX] [Abstract] [PDF]
Although public displays are increasingly prevalent inpublic spaces, they are generally not interactive. Menutechniques can enable users to select what is interestingto them. Current touch screen techniques are unsuitable,because for many public displays, users merelypass by and rarely stop. We investigate commandselection in this new context of passing-by interaction,in which users only have a few seconds to interact. Wepresent six hands-free gestural techniques and evaluatethem in a Wizard-of-Oz experiment. Based on theresults of this study, we provide design recommendationsfor menu selection in passing-by situations.
@InProceedings{ning2011ldul,
author = {Tongyan Ning AND J\"{o}rg M\"{u}ller AND Robert Walter AND Gilles Bailly AND Chachatvan Wacharamanotham AND Jan Borchers AND Florian Alt},
booktitle = {{Proceedings of the CHI Workshop on Large Displays in Urban Life}},
title = {{No Need To Stop: Menu Techniques for Passing by Public Displays}},
year = {2011},
address = {Vancouver, BC, Canada},
month = {apr},
note = {ning2011ldul},
abstract = {Although public displays are increasingly prevalent inpublic spaces, they are generally not interactive. Menutechniques can enable users to select what is interestingto them. Current touch screen techniques are unsuitable,because for many public displays, users merelypass by and rarely stop. We investigate commandselection in this new context of passing-by interaction,in which users only have a few seconds to interact. Wepresent six hands-free gestural techniques and evaluatethem in a Wizard-of-Oz experiment. Based on theresults of this study, we provide design recommendationsfor menu selection in passing-by situations.},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/ning2011ldul.pdf},
}
F. Alt, A. Bungert, B. Pfleging, A. Schmidt, and M. Havemann. Supporting Children With Special Needs Through Multi-Perspective Behavior Analysis. In Proceedings of the Tenth International Conference on Mobile and Ubiquitous Multimedia (MUM’11), Association for Computing Machinery, New York, NY, USA, 2011, p. 81–84. doi:10.1145/2107596.2107605
[BibTeX] [Abstract] [PDF]
In past years, ubiquitous computing technologies have been successfully deployed for supporting children with special needs. One focus of current research has been on post-hoc behavior analysis based on video footage where one or multiple cameras were used to review situations in which children behaved in a certain way. As miniaturized cameras as well as portable devices are becoming available at low costs, we envision a new quality in supporting the diagnosis, observation, and education of children with special needs. In contrast to existing approaches that use cameras in fix locations, we suggest to use multiple mobile camera perspectives. In this way observation data from fellow classmates, teachers, and caregivers can be considered, even in highly dynamic outdoor situations. In this paper we present MuPerBeAn, a platform that allows multi-perspective video footage from mobile cameras to be collected, synchronously reviewed, and annotated. We report on interviews with caregivers and parents and present a qualitative study based on two scenarios involving a total of seven children with autism (CWA). Our findings show that observing multiple mobile perspectives can help children as well as teachers to better reflect on situations, particularly during education.
@InProceedings{alt2011mum2,
author = {Alt, Florian and Bungert, Andreas and Pfleging, Bastian and Schmidt, Albrecht and Havemann, Meindert},
booktitle = {{Proceedings of the Tenth International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Supporting Children With Special Needs Through Multi-Perspective Behavior Analysis}},
year = {2011},
address = {New York, NY, USA},
month = {dec},
note = {alt2011mum2},
pages = {81--84},
publisher = {Association for Computing Machinery},
series = {MUM'11},
abstract = {In past years, ubiquitous computing technologies have been successfully deployed for supporting children with special needs. One focus of current research has been on post-hoc behavior analysis based on video footage where one or multiple cameras were used to review situations in which children behaved in a certain way. As miniaturized cameras as well as portable devices are becoming available at low costs, we envision a new quality in supporting the diagnosis, observation, and education of children with special needs. In contrast to existing approaches that use cameras in fix locations, we suggest to use multiple mobile camera perspectives. In this way observation data from fellow classmates, teachers, and caregivers can be considered, even in highly dynamic outdoor situations. In this paper we present MuPerBeAn, a platform that allows multi-perspective video footage from mobile cameras to be collected, synchronously reviewed, and annotated. We report on interviews with caregivers and parents and present a qualitative study based on two scenarios involving a total of seven children with autism (CWA). Our findings show that observing multiple mobile perspectives can help children as well as teachers to better reflect on situations, particularly during education.},
acmid = {2107605},
doi = {10.1145/2107596.2107605},
isbn = {978-1-4503-1096-3},
keywords = {autism, cameras, mobile devices, ubiquitous computing},
location = {Beijing, China},
numpages = {4},
timestamp = {2011.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2011mum2.pdf},
}
F. Alt, T. Kubitza, D. Bial, F. Zaidan, M. Ortel, B. Zurmaar, T. Lewen, A. S. Shirazi, and A. Schmidt. Digifieds: Insights into Deploying Digital Public Notice Areas in the Wild. In Proceedings of the 10th International Conference on Mobile and Ubiquitous Multimedia (MUM ’11), Association for Computing Machinery, New York, NY, USA, 2011, p. 165–174. doi:10.1145/2107596.2107618
[BibTeX] [Abstract] [PDF]
Traditional public notice areas (PNAs) are nowadays a popular means to publicly exchange information and reach people of a local community. The high usability led to a wide-spread use in stores, cafes, supermarkets, and public institutions. With public displays permeating public spaces and with display providers and owners being willing to share parts of their display space we envision traditional PNAs to be complemented or even replaced by their digital counterparts in the future, hence contributing to making public displays a novel communication medium. In this paper we report on the design and development of Digifieds (derived from digital classified), a digital public notice area. We deployed and evaluated Digifieds in an urban environment in the context of the UbiChallenge 2011 in Oulu, Finland over the course of 6 months. The deployment allowed the users’ view to be studied with regard to the envisioned content, preferred interaction techniques, as well as privacy concerns, and to compare them against traditional PNAs.
@InProceedings{alt2011mum1,
author = {Alt, Florian and Kubitza, Thomas and Bial, Dominik and Zaidan, Firas and Ortel, Markus and Zurmaar, Bj\"{o}rn and Lewen, Tim and Shirazi, Alireza Sahami and Schmidt, Albrecht},
booktitle = {{Proceedings of the 10th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Digifieds: Insights into Deploying Digital Public Notice Areas in the Wild}},
year = {2011},
address = {New York, NY, USA},
note = {alt2011mum1},
pages = {165--174},
publisher = {Association for Computing Machinery},
series = {MUM '11},
abstract = {Traditional public notice areas (PNAs) are nowadays a popular means to publicly exchange information and reach people of a local community. The high usability led to a wide-spread use in stores, cafes, supermarkets, and public institutions. With public displays permeating public spaces and with display providers and owners being willing to share parts of their display space we envision traditional PNAs to be complemented or even replaced by their digital counterparts in the future, hence contributing to making public displays a novel communication medium. In this paper we report on the design and development of Digifieds (derived from digital classified), a digital public notice area. We deployed and evaluated Digifieds in an urban environment in the context of the UbiChallenge 2011 in Oulu, Finland over the course of 6 months. The deployment allowed the users' view to be studied with regard to the envisioned content, preferred interaction techniques, as well as privacy concerns, and to compare them against traditional PNAs.},
acmid = {2107618},
doi = {10.1145/2107596.2107618},
isbn = {978-1-4503-1096-3},
keywords = {classifieds, digifieds, interaction, public displays, urban computing},
location = {Beijing, China},
numpages = {10},
timestamp = {2011.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2011mum1.pdf},
}
J. Müller, F. Alt, and D. Michelis, Pervasive Advertising, {Springer London Limited}, 2011.
[BibTeX] [Abstract] [PDF]
As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.
@Book{mueller2011perad,
title = {{Pervasive Advertising}},
publisher = {{Springer London Limited}},
year = {2011},
author = {J{\"o}rg M{\"u}ller and Florian Alt and Daniel Michelis},
isbn = {978-0-85729-351-0},
note = {mueller2011perad},
abstract = {As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.},
bibsource = {DBLP, http://dblp.uni-trier.de},
comment = {978-0-85729-351-0},
ee = {http://dx.doi.org/10.1007/978-0-85729-352-7},
timestamp = {2011.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2011perad.pdf},
}
J. Müller, F. Alt, and D. Michelis. Introduction to Pervasive Advertising. In Pervasive Advertising (), Springer Limited London, 2011. doi:10.1007/978-0-85729-352-7_1
[BibTeX] [Abstract] [PDF]
As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.
@InProceedings{mueller2011perad-intro,
author = {J\"{o}rg M\"{u}ller and Florian Alt and Daniel Michelis},
booktitle = {{Pervasive Advertising}},
title = {{Introduction to Pervasive Advertising}},
year = {2011},
editor = {J\"{o}rg M\"{u}ller and Florian Alt and Daniel Michelis},
month = {sep},
note = {mueller2011perad-intro},
publisher = {Springer Limited London},
abstract = {As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.},
comment = {978-0-85729-351-0},
doi = {10.1007/978-0-85729-352-7_1},
owner = {flo},
timestamp = {2011.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2011perad-intro.pdf},
}
N. Memarovic, M. Langheinrich, and F. Alt. Connecting People through Content – Promoting Community Identity Cognition through People and Places. In Proceedings of Community Informatics (), 2011.
[BibTeX] [Abstract] [PDF]
Large public screens are proliferating in public spaces. Today, most of them arestandalone installations that display advertisements in the form of slides, short movies, or stillimages. However, it is not hard to imagine that these displays will soon be connected throughthe Internet, thus creating a global and powerful communication medium capable of providingrich, interactive applications. We believe that such a medium has the potential to fosterconnections within and between communities in public spaces. In this paper we present aresearch agenda for interacting places, i.e., public spaces that connect communities throughpublic displays. We then report on our initial work in this space, in particular on using publicdisplays for creating what we call identity cognition – increasing the sense of being connectedbetween community members occupying the same space. We have investigated two options forachieving identity cognition: (a) through content that originates from the environment, and (b)through content that originates from people. Content originating from the environment portraysinformation about a display’s surrounding. For this type of content, identity cognition is usuallybeing achieved implicitly by stimulating the effect of ‘triangulation’, an effect whereparticularities of the physical space act as links between people. Content originating frompeople, on the other hand, explicitly achieves identity cognition by promoting community valuesthrough content that expresses the attitudes, beliefs, and ideas of individual communitymembers. We have built and deployed two public display applications that support identitycognition using environmentally-sourced content and people-sourced content, respectively.
@InProceedings{memarovic2011cirn,
author = {Nemanja Memarovic and Marc Langheinrich and Florian Alt},
booktitle = {{Proceedings of Community Informatics}},
title = {{Connecting People through Content - Promoting Community Identity Cognition through People and Places}},
year = {2011},
note = {memarovic2011cirn},
abstract = {Large public screens are proliferating in public spaces. Today, most of them arestandalone installations that display advertisements in the form of slides, short movies, or stillimages. However, it is not hard to imagine that these displays will soon be connected throughthe Internet, thus creating a global and powerful communication medium capable of providingrich, interactive applications. We believe that such a medium has the potential to fosterconnections within and between communities in public spaces. In this paper we present aresearch agenda for interacting places, i.e., public spaces that connect communities throughpublic displays. We then report on our initial work in this space, in particular on using publicdisplays for creating what we call identity cognition – increasing the sense of being connectedbetween community members occupying the same space. We have investigated two options forachieving identity cognition: (a) through content that originates from the environment, and (b)through content that originates from people. Content originating from the environment portraysinformation about a display’s surrounding. For this type of content, identity cognition is usuallybeing achieved implicitly by stimulating the effect of ‘triangulation’, an effect whereparticularities of the physical space act as links between people. Content originating frompeople, on the other hand, explicitly achieves identity cognition by promoting community valuesthrough content that expresses the attitudes, beliefs, and ideas of individual communitymembers. We have built and deployed two public display applications that support identitycognition using environmentally-sourced content and people-sourced content, respectively.},
location = {Prato, Italy},
timestamp = {2011.08.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2011cirn.pdf},
}
F. Alt, N. Memarovic, I. Elhart, D. Bial, A. Schmidt, M. Langheinrich, G. Harboe, E. Huang, and M. P. Scipioni. Designing Shared Public Display Networks: Implications from Today’s Paper-based Notice Areas. In Proceedings of the Ninth International Conference on Pervasive Computing (Pervasive’11), Springer-Verlag, Berlin, Heidelberg, 2011, p. 258–275. doi:10.1007/978-3-642-21726-5_17
[BibTeX] [Abstract] [PDF]
Abstract. Large public displays have become a regular conceptual element in many shops and businesses, where they advertise products or highlight upcoming events. In our work, we are interested in exploring how these isolated display solutions can be interconnected to form a single large network of public displays, thus supporting novel forms of sharing access to display real estate. In order to explore the feasibility of this vision, we investigated today’s practices surrounding shared notice areas, i.e. places where customers and visitors can put up event posters and classifieds, such as shop windows or notice boards. In particular, we looked at the content posted to such areas, the means for sharing it (i.e., forms of content control), and the reason for providing the shared notice area. Based on two-week long photo logs and a number of in-depth interviews with providers of such notice areas, we provide a systematic assessment of factors that inhibit or promote the shared use of public display space, ultimately leading to a set of concrete design implication for providing future digital versions of such public notice areas in the form of networked public displays.
@InProceedings{alt2011pervasive,
author = {Alt, Florian and Memarovic, Nemanja and Elhart, Ivan and Bial, Dominik and Schmidt, Albrecht and Langheinrich, Marc and Harboe, Gunnar and Huang, Elaine and Scipioni, Marcello P.},
booktitle = {{Proceedings of the Ninth International Conference on Pervasive Computing}},
title = {{Designing Shared Public Display Networks: Implications from Today's Paper-based Notice Areas}},
year = {2011},
address = {Berlin, Heidelberg},
month = {jun},
note = {alt2011pervasive},
pages = {258--275},
publisher = {Springer-Verlag},
series = {Pervasive'11},
abstract = {Abstract. Large public displays have become a regular conceptual element in many shops and businesses, where they advertise products or highlight upcoming events. In our work, we are interested in exploring how these isolated display solutions can be interconnected to form a single large network of public displays, thus supporting novel forms of sharing access to display real estate. In order to explore the feasibility of this vision, we investigated today’s practices surrounding shared notice areas, i.e. places where customers and visitors can put up event posters and classifieds, such as shop windows or notice boards. In particular, we looked at the content posted to such areas, the means for sharing it (i.e., forms of content control), and the reason for providing the shared notice area. Based on two-week long photo logs and a number of in-depth interviews with providers of such notice areas, we provide a systematic assessment of factors that inhibit or promote the shared use of public display space, ultimately leading to a set of concrete design implication for providing future digital versions of such public notice areas in the form of networked public displays.},
acmid = {2021999},
doi = {10.1007/978-3-642-21726-5_17},
isbn = {978-3-642-21725-8},
keywords = {advertising, observation, public display},
location = {San Francisco, USA},
numpages = {18},
timestamp = {2011.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2011pervasive.pdf},
}
F. Alt, D. Bial, T. Kubitza, A. S. Shirazi, M. Ortel, B. Zurmaar, F. Zaidan, T. Lewen, and A. Schmidt. Digifieds: Evaluating Suitable Interaction Techniques for Shared Public Notice Areas. In Adjunct Proceedings of the Ninth International Conference on Pervasive Computing (Pervasive’11), San Francisco, CA, USA, 2011.
[BibTeX] [Abstract] [PDF]
Public notice areas are nowadays being widely used in stores, restaurants,cafes and public institutions by customers and visitors to sell or advertiseproducts and upcoming events. Although web platforms such as Craigslist oreBay offer similar services, traditional notice areas are highly popular as usingpen and paper poses only a minimal barrier to share content. With public displaysproliferating the public space and with means to network these displays,novel opportunities arise as to how information can be managed and shared. Inan initial step we systematically assessed factors inhibiting or promoting theshared use of public display space and derived design implications for providinga digital version of such public notice areas [2]. In this poster we report onthe implementation of such a digital shared notice area, called Digifieds. Withan initial lab study we aimed at understanding suitable means of interactionwhen it comes to creating, posting, and taking away content.
@InProceedings{alt2011pervasiveadj,
author = {Florian Alt AND Dominik Bial AND Thomas Kubitza AND Alireza Sahami Shirazi AND Markus Ortel AND Bjoern Zurmaar AND Firas Zaidan AND Tim Lewen AND Albrecht Schmidt},
booktitle = {{Adjunct Proceedings of the Ninth International Conference on Pervasive Computing}},
title = {{Digifieds: Evaluating Suitable Interaction Techniques for Shared Public Notice Areas}},
year = {2011},
address = {San Francisco, CA, USA},
month = {jun},
note = {alt2011pervasiveadj},
series = {Pervasive'11},
abstract = {Public notice areas are nowadays being widely used in stores, restaurants,cafes and public institutions by customers and visitors to sell or advertiseproducts and upcoming events. Although web platforms such as Craigslist oreBay offer similar services, traditional notice areas are highly popular as usingpen and paper poses only a minimal barrier to share content. With public displaysproliferating the public space and with means to network these displays,novel opportunities arise as to how information can be managed and shared. Inan initial step we systematically assessed factors inhibiting or promoting theshared use of public display space and derived design implications for providinga digital version of such public notice areas [2]. In this poster we report onthe implementation of such a digital shared notice area, called Digifieds. Withan initial lab study we aimed at understanding suitable means of interactionwhen it comes to creating, posting, and taking away content.},
journal = {Adjunct Proceedings of},
owner = {flo},
timestamp = {2011.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2011pervasiveadj.pdf},
}
A. S. Shirazi, T. Kubitza, F. Alt, P. Tarasiewicz, A. Bungert, V. Minakov, and A. Schmidt. Mobile Context-based Ride Sharing. In Adjunct Proceedings of the Ninth International Conference on Pervasive Computing (Pervasive’11), San Francisco, CA, US, 2011.
[BibTeX] [Abstract] [PDF]
When it comes to transportation, especially in densely populated areas,people usually face a trade-off between convenience and costs. Whereas onone hand convenience as a driving factor leads to that people are preferring touse cars, air pollution, traffic jams, and high cost due to fuel price on the otherhand encourage many people (e.g., commuters) to use collective transportation(CT), such as public transport systems. However it does not support door-todoortransportation and might be inconvenient due to limited services in offpeakhours or high costs when travelling long distances. A solution growing inpopularity is ride sharing, a form of CT making alternative transportation moreaffordable. In this paper we present a modular platform supporting differentforms of ride sharing based on context information. WEtaxi is a system, whichallows sharing taxis among multiple persons. WEticket supports sharing traintickets through finding additional people going onto the same journey.
@InProceedings{sahami2011pervasiveadj,
author = {Alireza Sahami Shirazi AND Thomas Kubitza AND Florian Alt AND Philipp Tarasiewicz AND Andreas Bungert AND Vladimir Minakov AND Albrecht Schmidt},
booktitle = {{Adjunct Proceedings of the Ninth International Conference on Pervasive Computing}},
title = {{Mobile Context-based Ride Sharing}},
year = {2011},
address = {San Francisco, CA, US},
note = {sahami2011pervasiveadj},
series = {Pervasive'11},
abstract = {When it comes to transportation, especially in densely populated areas,people usually face a trade-off between convenience and costs. Whereas onone hand convenience as a driving factor leads to that people are preferring touse cars, air pollution, traffic jams, and high cost due to fuel price on the otherhand encourage many people (e.g., commuters) to use collective transportation(CT), such as public transport systems. However it does not support door-todoortransportation and might be inconvenient due to limited services in offpeakhours or high costs when travelling long distances. A solution growing inpopularity is ride sharing, a form of CT making alternative transportation moreaffordable. In this paper we present a modular platform supporting differentforms of ride sharing based on context information. WEtaxi is a system, whichallows sharing taxis among multiple persons. WEticket supports sharing traintickets through finding additional people going onto the same journey.},
owner = {flo},
timestamp = {2011.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/sahami2011pervasiveadj.pdf},
}
M. Langheinrich, N. Memarovic, I. Elhart, and F. Alt. Autopoiesic Content: A Conceptual Model for Enabling Situated Self-generative Content for Public Displays. In Proceedings of the First Workshop on Pervasive Urban Applications (PURBA’11), 2011.
[BibTeX] [Abstract] [PDF]
Abstract. The significant price drops in large LCD panels have led to a massive proliferation of digital public displays in public spaces. Most of these displays, however, simply show some form of traditional advertising, such as short commercials, animated presentations, or still images. Creating content that ex-plicitly takes the particular location and surroundings of a space into account, in order to increase its relevance for passers-by, is typically infeasible due to the high costs associated with customized content. We argue that the concept of au-topoiesic content (i.e., self-generative content) could significantly increase the local relevance of such situated public displays without requiring much custom-ization efforts. As a sample application, this position paper outlines the concept and architecture of Funsquare, a large public display system that uses autopoi-esic content to facilitate social interaction.
@InProceedings{langheinrich2011purba,
author = {M. Langheinrich AND N. Memarovic AND I. Elhart AND F. Alt},
booktitle = {{Proceedings of the First Workshop on Pervasive Urban Applications}},
title = {{Autopoiesic Content: A Conceptual Model for Enabling Situated Self-generative Content for Public Displays}},
year = {2011},
month = {jun},
note = {langheinrich2011purba},
series = {PURBA'11},
abstract = {Abstract. The significant price drops in large LCD panels have led to a massive proliferation of digital public displays in public spaces. Most of these displays, however, simply show some form of traditional advertising, such as short commercials, animated presentations, or still images. Creating content that ex-plicitly takes the particular location and surroundings of a space into account, in order to increase its relevance for passers-by, is typically infeasible due to the high costs associated with customized content. We argue that the concept of au-topoiesic content (i.e., self-generative content) could significantly increase the local relevance of such situated public displays without requiring much custom-ization efforts. As a sample application, this position paper outlines the concept and architecture of Funsquare, a large public display system that uses autopoi-esic content to facilitate social interaction.},
location = {San Francisco, US},
timestamp = {2011.05.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/langheinrich2011purba.pdf},
}
G. Beyer, F. Alt, J. Müller, A. Schmidt, K. Isakovic, S. Klose, M. Schiewe, and I. Haulsen. Audience Behavior Around Large Interactive Cylindrical Screens. In Proceedings of the 2011 Annual Conference on Human Factors in Computing Systems (CHI’11), Association for Computing Machinery, New York, NY, USA, 2011, p. 1021–1030. doi:http://doi.acm.org/10.1145/1978942.1979095
[BibTeX] [Abstract] [PDF]
Non-planar screens, such as columns, have been a popular means for displaying information for a long time. In con-trast to traditional displays their digital counterparts are mainly flat and rectangular due to current technological constraints. However, we envision bendable displays to be available in the future, which will allow for creating new forms of displays with new properties. In this paper we ex-plore cylindrical displays as a possible form of such novel public displays. We present a prototype and report on a user study, comparing the influence of the display shape on user behavior and user experience between flat and cylindrical displays. The results indicate that people move more in the vicinity of cylindrical displays and that there is no longer a default position when it comes to interaction. As a result, such displays are especially suitable to keep people in motion and to support gesture-like interaction.
@InProceedings{beyer2011chi,
author = {Beyer, Gilbert and Alt, Florian and M\"{u}ller, J\"{o}rg and Schmidt, Albrecht and Isakovic, Karsten and Klose, Stefan and Schiewe, Manuel and Haulsen, Ivo},
booktitle = {{Proceedings of the 2011 Annual Conference on Human Factors in Computing Systems}},
title = {{Audience Behavior Around Large Interactive Cylindrical Screens}},
year = {2011},
address = {New York, NY, USA},
month = {apr},
note = {beyer2011chi},
pages = {1021--1030},
publisher = {Association for Computing Machinery},
series = {CHI'11},
abstract = {Non-planar screens, such as columns, have been a popular means for displaying information for a long time. In con-trast to traditional displays their digital counterparts are mainly flat and rectangular due to current technological constraints. However, we envision bendable displays to be available in the future, which will allow for creating new forms of displays with new properties. In this paper we ex-plore cylindrical displays as a possible form of such novel public displays. We present a prototype and report on a user study, comparing the influence of the display shape on user behavior and user experience between flat and cylindrical displays. The results indicate that people move more in the vicinity of cylindrical displays and that there is no longer a default position when it comes to interaction. As a result, such displays are especially suitable to keep people in motion and to support gesture-like interaction.},
acmid = {1979095},
doi = {http://doi.acm.org/10.1145/1978942.1979095},
isbn = {978-1-4503-0228-9},
keywords = {cylindrical screens, digital columns, display formats, interactive surfaces, non-planar screens, public displays},
location = {Vancouver, BC, Canada},
numpages = {10},
timestamp = {2011.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/beyer2011chi.pdf},
}
D. Bial, D. Kern, F. Alt, and A. Schmidt. Enhancing Outdoor Navigation Systems Through Vibrotactile Feedback. In CHI ’11 Extended Abstracts on Human Factors in Computing Systems (CHI EA’11), Association for Computing Machinery, New York, NY, USA, 2011, p. 1273–1278. doi:10.1145/1979742.1979760
[BibTeX] [Abstract] [PDF]
While driving many tasks compete for the attention of the user, mainly via the audio and visual channel. When designing systems depending upon providing feedback to users (e.g., navigation systems), it is a crucial prerequisite to minimize influence on and distraction from the driving task. This becomes even more important when designing systems for the use on motorbikes; space for output devices is scarce, as people are wearing helmets visual feedback is often difficult due to lighting conditions, and audio feedback is limited. In a first step we aimed at creating an understanding as to how information could be communicated in a meaningful way using vibrotactile signals. Therefore, we investigated suitable positions of actuators on the hand, appropriate length of the vibration stimulus, and different vibration patterns. We built a first prototype with 4 vibration actuators attached to the fingertips and asked 4 participants to test our prototype while driving. With this work we envision to lay the foundations for vibrotactile support in navigation systems.
@InProceedings{bial2011chiea,
author = {Bial, Dominik and Kern, Dagmar and Alt, Florian and Schmidt, Albrecht},
booktitle = {{CHI '11 Extended Abstracts on Human Factors in Computing Systems}},
title = {{Enhancing Outdoor Navigation Systems Through Vibrotactile Feedback}},
year = {2011},
address = {New York, NY, USA},
month = {apr},
note = {bial2011chiea},
pages = {1273--1278},
publisher = {Association for Computing Machinery},
series = {CHI EA'11},
abstract = {While driving many tasks compete for the attention of the user, mainly via the audio and visual channel. When designing systems depending upon providing feedback to users (e.g., navigation systems), it is a crucial prerequisite to minimize influence on and distraction from the driving task. This becomes even more important when designing systems for the use on motorbikes; space for output devices is scarce, as people are wearing helmets visual feedback is often difficult due to lighting conditions, and audio feedback is limited. In a first step we aimed at creating an understanding as to how information could be communicated in a meaningful way using vibrotactile signals. Therefore, we investigated suitable positions of actuators on the hand, appropriate length of the vibration stimulus, and different vibration patterns. We built a first prototype with 4 vibration actuators attached to the fingertips and asked 4 participants to test our prototype while driving. With this work we envision to lay the foundations for vibrotactile support in navigation systems.},
acmid = {1979760},
doi = {10.1145/1979742.1979760},
isbn = {978-1-4503-0268-5},
keywords = {field study, motorcycling, vibration patterns, vibro tactile navigation},
location = {Vancouver, BC, Canada},
numpages = {6},
timestamp = {2011.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/bial2011chiea.pdf},
}
G. Beyer, F. Alt, and J. Müller. On the Impact of Non-flat Screens on the Interaction with Public Displays. In Proceedings of the CHI Workshop on Large Displays in Urban Life (), Vancouver, BC, Canada, 2011.
[BibTeX] [Abstract] [PDF]
With decreasing prices for display technologies andbendable displays becoming commercially available,novel forms of public displays in arbitrary shapesemerge. However, different shapes impact on how usersbehave in the vicinity of such displays and how theyinteract with them. With our research we take a firststep towards exploring these novel displays. Wepresent findings from an initial study with cylindricaldisplays and discuss to what extent findings can be generalizedtowards other forms of public displays.
@InProceedings{beyer2011ldul,
author = {Gilbert Beyer AND Florian Alt AND J\"{o}rg M\"{u}ller},
booktitle = {{Proceedings of the CHI Workshop on Large Displays in Urban Life}},
title = {{On the Impact of Non-flat Screens on the Interaction with Public Displays}},
year = {2011},
address = {Vancouver, BC, Canada},
month = {apr},
note = {beyer2011ldul},
abstract = {With decreasing prices for display technologies andbendable displays becoming commercially available,novel forms of public displays in arbitrary shapesemerge. However, different shapes impact on how usersbehave in the vicinity of such displays and how theyinteract with them. With our research we take a firststep towards exploring these novel displays. Wepresent findings from an initial study with cylindricaldisplays and discuss to what extent findings can be generalizedtowards other forms of public displays.},
owner = {flo},
timestamp = {2011.04.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/beyer2011ldul.pdf},
}

2010

F. Alt, D. Kern, F. Schulte, B. Pfleging, A. S. Shirazi, and A. Schmidt. Enabling micro-entertainment in vehicles based on context information. In Proceedings of the 2nd International Conference on Automotive User Interfaces and Interactive Vehicular Applications (AutomotiveUI ’10), Association for Computing Machinery, New York, NY, USA, 2010, p. 117–124. doi:10.1145/1969773.1969794
[BibTeX] [Abstract] [PDF]
People spend a significant amount of time in their cars (US: 86 minutes/day, Europe: 43 minutes/day) while commuting, shopping, or traveling. Hence, the variety of entertainment in the car increases, and many vehicles are already equipped with displays, allowing for watching news, videos, accessing the Internet, or playing games. At the same time, the urbanization caused a massive increase of traffic volume, which led to people spending an ever-increasing amount of their time in front of red traffic lights. An observation of the prevailing forms of entertainment in the car reveals that content such as text, videos, or games are often a mere adaptation of content produced for television, public displays, PCs, or mobile phones and do not adapt to the situation in the car. In this paper we report on a web survey assessing which forms of entertainment and which types of content are considered to be useful for in-car entertainment by drivers. We then introduce an algorithm, which is capable of learning standing times in front of traffic lights based on GPS information only. This, on one hand, allows for providing content of appropriate length, on the other hand, for directing the attention of the driver back to-wards the street at the right time. Finally, we present a prototype implementation and a qualitative evaluation.
@InProceedings{alt2010autoui,
author = {Alt, Florian and Kern, Dagmar and Schulte, Fabian and Pfleging, Bastian and Shirazi, Alireza Sahami and Schmidt, Albrecht},
booktitle = {{Proceedings of the 2nd International Conference on Automotive User Interfaces and Interactive Vehicular Applications}},
title = {Enabling Micro-entertainment in Vehicles Based on Context Information},
year = {2010},
address = {New York, NY, USA},
note = {alt2010autoui},
pages = {117--124},
publisher = {Association for Computing Machinery},
series = {AutomotiveUI '10},
abstract = {People spend a significant amount of time in their cars (US: 86 minutes/day, Europe: 43 minutes/day) while commuting, shopping, or traveling. Hence, the variety of entertainment in the car increases, and many vehicles are already equipped with displays, allowing for watching news, videos, accessing the Internet, or playing games. At the same time, the urbanization caused a massive increase of traffic volume, which led to people spending an ever-increasing amount of their time in front of red traffic lights. An observation of the prevailing forms of entertainment in the car reveals that content such as text, videos, or games are often a mere adaptation of content produced for television, public displays, PCs, or mobile phones and do not adapt to the situation in the car. In this paper we report on a web survey assessing which forms of entertainment and which types of content are considered to be useful for in-car entertainment by drivers. We then introduce an algorithm, which is capable of learning standing times in front of traffic lights based on GPS information only. This, on one hand, allows for providing content of appropriate length, on the other hand, for directing the attention of the driver back to-wards the street at the right time. Finally, we present a prototype implementation and a qualitative evaluation.},
acmid = {1969794},
doi = {10.1145/1969773.1969794},
isbn = {978-1-4503-0437-5},
keywords = {GPS, context, micro entertainment, vehicle},
location = {Pittsburgh, Pennsylvania},
numpages = {8},
timestamp = {2010.11.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2010autoui.pdf},
}
F. Alt, A. S. Shirazi, A. Schmidt, U. Kramer, and Z. Nawaz. Location-based Crowdsourcing: Extending Crowdsourcing to the Real World. In Proceedings of the Sixth Nordic Conference on Human-Computer Interaction: Extending Boundaries (NordiCHI ’10), Association for Computing Machinery, New York, NY, USA, 2010, p. 13–22. doi:10.1145/1868914.1868921
[BibTeX] [Abstract] [PDF]
The WWW and the mobile phone have become an essential means for sharing implicitly and explicitly generated information and a communication platform for many people. With the increasing ubiquity of location sensing included in mobile devices we investigate the arising opportunities for mobile crowdsourcing making use of the real world context. In this paper we assess how the idea of user-generated content, web-based crowdsourcing, and mobile electronic coordination can be combined to extend crowdsourcing beyond the digital domain and link it to tasks in the real world. To explore our concept we implemented a crowd-sourcing platform that integrates location as a parameter for distributing tasks to workers. In the paper we describe the concept and design of the platform and discuss the results of two user studies. Overall the findings show that integrating tasks in the physical world is useful and feasible. We observed that (1) mobile workers prefer to pull tasks rather than getting them pushed, (2) requests for pictures were the most favored tasks, and (3) users tended to solve tasks mainly in close proximity to their homes. Based on this, we discuss issues that should be considered during designing mobile crowdsourcing applications.
@InProceedings{alt2010nordichi,
author = {Alt, Florian and Shirazi, Alireza Sahami and Schmidt, Albrecht and Kramer, Urs and Nawaz, Zahid},
booktitle = {{Proceedings of the Sixth Nordic Conference on Human-Computer Interaction: Extending Boundaries}},
title = {{Location-based Crowdsourcing: Extending Crowdsourcing to the Real World}},
year = {2010},
address = {New York, NY, USA},
month = {oct},
note = {alt2010nordichi},
pages = {13--22},
publisher = {Association for Computing Machinery},
series = {NordiCHI '10},
abstract = {The WWW and the mobile phone have become an essential means for sharing implicitly and explicitly generated information and a communication platform for many people. With the increasing ubiquity of location sensing included in mobile devices we investigate the arising opportunities for mobile crowdsourcing making use of the real world context. In this paper we assess how the idea of user-generated content, web-based crowdsourcing, and mobile electronic coordination can be combined to extend crowdsourcing beyond the digital domain and link it to tasks in the real world. To explore our concept we implemented a crowd-sourcing platform that integrates location as a parameter for distributing tasks to workers. In the paper we describe the concept and design of the platform and discuss the results of two user studies. Overall the findings show that integrating tasks in the physical world is useful and feasible. We observed that (1) mobile workers prefer to pull tasks rather than getting them pushed, (2) requests for pictures were the most favored tasks, and (3) users tended to solve tasks mainly in close proximity to their homes. Based on this, we discuss issues that should be considered during designing mobile crowdsourcing applications.},
acmid = {1868921},
doi = {10.1145/1868914.1868921},
isbn = {978-1-60558-934-3},
journal = {Proceedings of the Sixth Nordic Conference on Human-Computer Interaction: Extending Boundaries},
keywords = {context, crowdsourcing, location, mobile phone},
location = {Reykjavik, Iceland},
numpages = {10},
timestamp = {2010.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2010nordichi.pdf},
}
J. Müller, F. Alt, D. Michelis, and A. Schmidt. Requirements and Design Space for Interactive Public Displays. In Proceedings of the International Conference on Multimedia (MM’10), Association for Computing Machinery, New York, NY, USA, 2010, p. 1285–1294. doi:10.1145/1873951.1874203
[BibTeX] [Abstract] [PDF]
Digital immersion is moving into public space. Interactive screens and public displays are deployed in urban environments, malls, and shop windows. Inner city areas, airports, train stations and stadiums are experiencing a transformation from traditional to digital displays enabling new forms of multimedia presentation and new user experiences. Imagine a walkway with digital displays that allows a user to immerse herself in her favorite content while moving through public space. In this paper we discuss the fundamentals for creating exciting public displays and multimedia experiences enabling new forms of engagement with digital content. Interaction in public space and with public displays can be categorized in phases, each having specific requirements. Attracting, engaging and motivating the user are central design issues that are addressed in this paper. We provide a comprehensive analysis of the design space explaining mental models and interaction modalities and we conclude a taxonomy for interactive public display from this analysis. Our analysis and the taxonomy are grounded in a large number of research projects, art installations and experience. With our contribution we aim at providing a comprehensive guide for designers and developers of interactive multimedia on public displays.
@InProceedings{mueller2010mm,
author = {M\"{u}ller, J\"{o}rg and Alt, Florian and Michelis, Daniel and Schmidt, Albrecht},
booktitle = {{Proceedings of the International Conference on Multimedia}},
title = {{Requirements and Design Space for Interactive Public Displays}},
year = {2010},
address = {New York, NY, USA},
note = {mueller2010mm},
pages = {1285--1294},
publisher = {Association for Computing Machinery},
series = {MM'10},
abstract = {Digital immersion is moving into public space. Interactive screens and public displays are deployed in urban environments, malls, and shop windows. Inner city areas, airports, train stations and stadiums are experiencing a transformation from traditional to digital displays enabling new forms of multimedia presentation and new user experiences. Imagine a walkway with digital displays that allows a user to immerse herself in her favorite content while moving through public space. In this paper we discuss the fundamentals for creating exciting public displays and multimedia experiences enabling new forms of engagement with digital content. Interaction in public space and with public displays can be categorized in phases, each having specific requirements. Attracting, engaging and motivating the user are central design issues that are addressed in this paper. We provide a comprehensive analysis of the design space explaining mental models and interaction modalities and we conclude a taxonomy for interactive public display from this analysis. Our analysis and the taxonomy are grounded in a large number of research projects, art installations and experience. With our contribution we aim at providing a comprehensive guide for designers and developers of interactive multimedia on public displays.},
acmid = {1874203},
doi = {10.1145/1873951.1874203},
isbn = {978-1-60558-933-6},
keywords = {design space, interaction, public displays, requirements},
location = {Firenze, Italy},
numpages = {10},
timestamp = {2010.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2010mm.pdf},
}
A. Müller, A. S. Shirazi, F. Alt, and A. Schmidt. ZoneTrak: Design and Implementation of an Emergency Management Assistance System. In Adjunct Proceedings of the Eigth International Conference on Pervasive Computing (Pervasive’10), Helsinki, Finland, 2010.
[BibTeX] [Abstract] [PDF]
Though pervasive computing technologies are omnipresentin our daily lives, emergency cases, such as earthquakes, or res oftencause serious damages to the underlying infrastructure. In such casesrescue units rely on paper maps of the operation areas and importantinformation are broadcasted either from a central unit or from otherteams. This information is manually updated on the paper map, not onlycausing a lot of work but also being a potential source for errors. In thisresearch we implemented a system that provides a positioning system totrack forces and allows sharing information in real-time. Rescue units canannotate dierent zones and broadcast data to other units whose mapsare automatically updated with available annotations.We show how sucha system can be operated based on an independent infrastructure whichmakes it robust and reliable in emergency and catastrophe situations.
@InProceedings{mueller2010pevasiveadj,
author = {Alexander M\"{u}ller AND Alireza Sahami Shirazi AND Florian Alt AND Albrecht Schmidt},
booktitle = {{Adjunct Proceedings of the Eigth International Conference on Pervasive Computing}},
title = {{ZoneTrak: Design and Implementation of an Emergency Management Assistance System}},
year = {2010},
address = {Helsinki, Finland},
note = {mueller2010pevasiveadj},
series = {Pervasive'10},
abstract = {Though pervasive computing technologies are omnipresentin our daily lives, emergency cases, such as earthquakes, or res oftencause serious damages to the underlying infrastructure. In such casesrescue units rely on paper maps of the operation areas and importantinformation are broadcasted either from a central unit or from otherteams. This information is manually updated on the paper map, not onlycausing a lot of work but also being a potential source for errors. In thisresearch we implemented a system that provides a positioning system totrack forces and allows sharing information in real-time. Rescue units canannotate dierent zones and broadcast data to other units whose mapsare automatically updated with available annotations.We show how sucha system can be operated based on an independent infrastructure whichmakes it robust and reliable in emergency and catastrophe situations.},
owner = {flo},
timestamp = {2010.06.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2010pevasiveadj.pdf},
}
G. Beyer, F. Alt, S. Klose, K. Isakovic, A. S. Shirazi, and A. Schmidt. Design Space for Large Cylindrical Screens. In Proceedings of the Third International Workshop on Pervasive Avertising and Shopping (PerAd’10), Helsinki, Finland, 2010.
[BibTeX] [Abstract] [PDF]
The era of modern cylindrical screens, so-called advertising columns,began in the middle of the 19th century. Even nowadays they are still a popularadvertising medium, which integrates well with urban environments. Withadvances in display technologies (LEDs, projectors) digital forms of suchcolumns emerge and enable novel forms of visualization and interaction, whichsignificantly differ from flat, rectangular screens due to the round shape. In thispaper we present the design space for large cylindrical screens and outlinedesign principles based on observations and experiments with a prototype of adigital column. We especially focus on the differences with flat, rectangulardisplays and report on challenges related to the deployment and development ofapplications for cylindrical screens.
@InProceedings{beyer2010perad,
author = {Gilbert Beyer AND Florian Alt AND Stefan Klose AND Karsten Isakovic AND Alireza Sahami Shirazi AND Albrecht Schmidt},
booktitle = {{Proceedings of the Third International Workshop on Pervasive Avertising and Shopping}},
title = {{Design Space for Large Cylindrical Screens}},
year = {2010},
address = {Helsinki, Finland},
month = {jun},
note = {beyer2010perad},
series = {PerAd'10},
abstract = {The era of modern cylindrical screens, so-called advertising columns,began in the middle of the 19th century. Even nowadays they are still a popularadvertising medium, which integrates well with urban environments. Withadvances in display technologies (LEDs, projectors) digital forms of suchcolumns emerge and enable novel forms of visualization and interaction, whichsignificantly differ from flat, rectangular screens due to the round shape. In thispaper we present the design space for large cylindrical screens and outlinedesign principles based on observations and experiments with a prototype of adigital column. We especially focus on the differences with flat, rectangulardisplays and report on challenges related to the deployment and development ofapplications for cylindrical screens.},
owner = {flo},
timestamp = {2010.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/beyer2010perad.pdf},
}
F. Alt, A. S. Shirazi, S. Legien, A. Schmidt, and J. Mennenöh. Creating Meaningful Melodies from Text Messages. In Proceedings of the 2010 Conference on New Interfaces for Musical Expression (NIME’10), 2010, p. 63–68.
[BibTeX] [Abstract] [PDF]
Writing text messages (e.g. email, SMS, instant messaging) is apopular form of synchronous and asynchronous communication.However, when it comes to notifying users about newmessages, current audio-based approaches, such as notificationtones, are very limited in conveying information. In this paperwe show how entire text messages can be encoded into a meaningfuland euphonic melody in such a way that users can guessa message’s intention without actually seeing the content. First,as a proof of concept, we report on the findings of an initial onlinesurvey among 37 musicians and 32 non-musicians evaluatingthe feasibility and validity of our approach. We show thatour representation is understandable and that there are no significantdifferences between musicians and non-musicians.Second, we evaluated the approach in a real world scenariobased on a Skype plug-in. In a field study with 14 participantswe showed that sonified text messages strongly impact on theusers’ message checking behavior by significantly reducing thetime between receiving and reading an incoming message.
@InProceedings{alt2010nime,
author = {Alt, F. and Shirazi, A.S. and Legien, S. and Schmidt, A. and Mennen{\"o}h, J.},
booktitle = {{Proceedings of the 2010 Conference on New Interfaces for Musical Expression}},
title = {{Creating Meaningful Melodies from Text Messages}},
year = {2010},
month = {jun},
note = {alt2010nime},
pages = {63--68},
series = {NIME'10},
abstract = {Writing text messages (e.g. email, SMS, instant messaging) is apopular form of synchronous and asynchronous communication.However, when it comes to notifying users about newmessages, current audio-based approaches, such as notificationtones, are very limited in conveying information. In this paperwe show how entire text messages can be encoded into a meaningfuland euphonic melody in such a way that users can guessa message’s intention without actually seeing the content. First,as a proof of concept, we report on the findings of an initial onlinesurvey among 37 musicians and 32 non-musicians evaluatingthe feasibility and validity of our approach. We show thatour representation is understandable and that there are no significantdifferences between musicians and non-musicians.Second, we evaluated the approach in a real world scenariobased on a Skype plug-in. In a field study with 14 participantswe showed that sonified text messages strongly impact on theusers’ message checking behavior by significantly reducing thetime between receiving and reading an incoming message.},
timestamp = {2010.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2010nime.pdf},
}
J. D. H. Ramos, A. Tabard, and F. Alt. Contextual-Analysis for Infrastructure Awareness Systems. In Proccedings of the CHI Workshop “Bridging the Gap: Moving from Contextual Analysis to Design” (), Atlanta, GA, USA, 2010.
[BibTeX] [PDF]
@InProceedings{ramos2010chiws,
author = {Juan David Hincapie Ramos AND Aurelien Tabard AND Florian Alt},
booktitle = {{Proccedings of the CHI Workshop ``Bridging the Gap: Moving from Contextual Analysis to Design''}},
title = {{Contextual-Analysis for Infrastructure Awareness Systems}},
year = {2010},
address = {Atlanta, GA, USA},
note = {ramos2010chiws},
owner = {flo},
timestamp = {2010.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/},
}
I. Reif, F. Alt, J. D. Hincapié Ramos, K. Poteriaykina, and J. Wagner. Cleanly: Trashducation Urban System. In CHI ’10 Extended Abstracts on Human Factors in Computing Systems (CHI EA’10), Association for Computing Machinery, New York, NY, USA, 2010, p. 3511–3516. doi:10.1145/1753846.1754010
[BibTeX] [Abstract] [PDF]
Half the world’s population is expected to live in urban areas by 2020. The high human density and changes in peoples’ consumption habits result in an ever-increasing amount of trash that must be handled by governing bodies. Problems created by inefficient or dysfunctional cleaning services are exacerbated by a poor personal trash management culture. In this paper we present Cleanly, an urban trashducation system aimed at creating awareness of garbage production and management, which may serve as an educational plat-form in the urban environment. We report on data collected from an online survey, which not only motivates our research but also provides useful information on reasons and possible solutions for trash problems.
@InProceedings{reif2010chiea,
author = {Reif, Inbal and Alt, Florian and Hincapi{\'e} Ramos, Juan David and Poteriaykina, Katerina and Wagner, Johannes},
booktitle = {{CHI '10 Extended Abstracts on Human Factors in Computing Systems}},
title = {{Cleanly: Trashducation Urban System}},
year = {2010},
address = {New York, NY, USA},
note = {reif2010chiea},
pages = {3511--3516},
publisher = {Association for Computing Machinery},
series = {CHI EA'10},
abstract = {Half the world's population is expected to live in urban areas by 2020. The high human density and changes in peoples' consumption habits result in an ever-increasing amount of trash that must be handled by governing bodies. Problems created by inefficient or dysfunctional cleaning services are exacerbated by a poor personal trash management culture. In this paper we present Cleanly, an urban trashducation system aimed at creating awareness of garbage production and management, which may serve as an educational plat-form in the urban environment. We report on data collected from an online survey, which not only motivates our research but also provides useful information on reasons and possible solutions for trash problems.},
acmid = {1754010},
doi = {10.1145/1753846.1754010},
isbn = {978-1-60558-930-5},
keywords = {design, interaction, public displays, recycling, rfid badges, trashducation, ubiquitous display environments},
location = {Atlanta, Georgia, USA},
numpages = {6},
timestamp = {2010.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/reif2010chiea.pdf},
}
J. Mennenöh, S. Kristes, F. Alt, A. S. S. Shirazi, A. Schmidt, and H. Schröder. Customer Touchpoints im stationären Einzelhandel – Potenzial von Pervasive Computing. Marketing Review St .Gallen, vol. 27, iss. 2, p. 37–42, 2010.
[BibTeX] [Abstract] [PDF]
Je mehr individuelle Leistungen die Kunden verlangen, desto mehr Informationen benötigen die Anbieter.Während die Händler im Distanzgeschäft über personalisierte Daten ihrer Kunden und vor allem im Online-Shop über Bewegungsdaten verfügen, hat der stationäre Einzelhandel noch erhebliche Datenlücken.Diese Lücken kann man mit einer Pervasive-Computing-Umgebung schließen. Neue CustomerTouchpointsliefern Informationen darüber, wer bei ihm einkauft und wie der Einkauf durchgeführt wird.
@Article{mennenoeh2010mrsg,
author = {Mennen\"{o}h, Julian and Kristes, Stefanie and Alt, Florian and Shirazi, Alireza Sahami Sahami and Schmidt, Albrecht and Schr\"{o}der, Hendrik},
journal = {{Marketing Review St .Gallen}},
title = {{Customer Touchpoints im station\"{a}ren Einzelhandel -- Potenzial von Pervasive Computing}},
year = {2010},
note = {mennenoeh2010mrsg},
number = {2},
pages = {37--42},
volume = {27},
abstract = {Je mehr individuelle Leistungen die Kunden verlangen, desto mehr Informationen benötigen die Anbieter.Während die Händler im Distanzgeschäft über personalisierte Daten ihrer Kunden und vor allem im Online-Shop über Bewegungsdaten verfügen, hat der stationäre Einzelhandel noch erhebliche Datenlücken.Diese Lücken kann man mit einer Pervasive-Computing-Umgebung schließen. Neue CustomerTouchpointsliefern Informationen darüber, wer bei ihm einkauft und wie der Einkauf durchgeführt wird.},
publisher = {Springer},
timestamp = {2010.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mennenoeh2010mrsg.pdf},
}
F. Alt, A. S. Shirazi, A. Kaiser, K. Pfeuffer, E. Gurkan, A. Schmidt, P. Holleis, and M. Wagner. Exploring Ambient Visualizations of Context Information. In Adjunct Proceedings of the Eigth Annual IEEE International Conference on Pervasive Computing and Communications (PerCom’09), IEEE, Mannheim, Germany, 2010, pp. 788-791.
[BibTeX] [Abstract] [PDF]
In this paper we investigate how ambient displays can be used to share context information. Currently, many personal devices provide context information, such as location or activity, and at the same time the number of ambient displays is increasing. We developed two prototypes for visualizing contextual information and initially explored the suitability of these in an online study. Additionally, we investigated which parameters are important for users when sharing personal context. Based on our findings we discuss guidelines for the design of ambient displays for context sharing.
@InProceedings{alt2010percomadj,
author = {Florian Alt and Alireza Sahami Shirazi and Andreas Kaiser and Ken Pfeuffer and Emre Gurkan and Albrecht Schmidt and Paul Holleis and Matthias Wagner},
booktitle = {{Adjunct Proceedings of the Eigth Annual IEEE International Conference on Pervasive Computing and Communications}},
title = {{Exploring Ambient Visualizations of Context Information}},
year = {2010},
address = {Mannheim, Germany},
month = {apr},
note = {alt2010percomadj},
pages = {788-791},
publisher = {IEEE},
series = {PerCom'09},
abstract = {In this paper we investigate how ambient displays can be used to share context information. Currently, many personal devices provide context information, such as location or activity, and at the same time the number of ambient displays is increasing. We developed two prototypes for visualizing contextual information and initially explored the suitability of these in an online study. Additionally, we investigated which parameters are important for users when sharing personal context. Based on our findings we discuss guidelines for the design of ambient displays for context sharing.},
bibsource = {DBLP, http://dblp.uni-trier.de},
ee = {http://dx.doi.org/10.1109/PERCOMW.2010.5470542},
timestamp = {2010.04.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2010percomadj.pdf},
}
A. S. Shirazi, T. Kubitza, F. Alt, B. Pfleging, and A. Schmidt. WEtransport: A Context-based Ride Sharing Platform. In Adjunct Proceedings of the Twelfth International Conference on Ubiquitous Computing (Ubicomp’10), Copenhagen, Danmark, 2010.
[BibTeX] [Abstract] [PDF]
In densely populated urban areas high amounts of trafficpose a major problem, which affects the environment, economy,and our lives. From a user’s perspective, the main issuesinclude delays due to traffic jams, lack of parking spaceand high costs due to increasing fuel prices (e.g., if commutinglong distances). Collective transportation (CT), e.g., publictransport systems, provides a partly solution to these issues.Yet, CT does not support door-to-door transportationhence reducing convenience; it might be limited in off-peakhours, and it is still a cost factor when travelling long distances.A solution to these issues is ride sharing, an evolvingform of CT making alternative transportation more affordable.In this paper we present a modular, context-aware ridesharing platform. We aim at enhancing convenience, reliability,and affordability of different forms of ride sharing bymeans of context data. Additionally our approach supports aneasy server- and client-side expansion due to the modularplatform structure.Author Keywords ride
@InProceedings{sahami2010ubicompadj,
author = {Alireza Sahami Shirazi AND Thomas Kubitza AND Florian Alt AND Bastian Pfleging AND Albrecht Schmidt},
booktitle = {{Adjunct Proceedings of the Twelfth International Conference on Ubiquitous Computing}},
title = {{WEtransport: A Context-based Ride Sharing Platform}},
year = {2010},
address = {Copenhagen, Danmark},
note = {sahami2010ubicompadj},
series = {Ubicomp'10},
abstract = {In densely populated urban areas high amounts of trafficpose a major problem, which affects the environment, economy,and our lives. From a user’s perspective, the main issuesinclude delays due to traffic jams, lack of parking spaceand high costs due to increasing fuel prices (e.g., if commutinglong distances). Collective transportation (CT), e.g., publictransport systems, provides a partly solution to these issues.Yet, CT does not support door-to-door transportationhence reducing convenience; it might be limited in off-peakhours, and it is still a cost factor when travelling long distances.A solution to these issues is ride sharing, an evolvingform of CT making alternative transportation more affordable.In this paper we present a modular, context-aware ridesharing platform. We aim at enhancing convenience, reliability,and affordability of different forms of ride sharing bymeans of context data. Additionally our approach supports aneasy server- and client-side expansion due to the modularplatform structure.Author Keywords ride},
owner = {flo},
timestamp = {2010.03.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/sahami2010ubicompadj.pdf},
}
A. Sahami Shirazi, A. Sarjanoja, F. Alt, A. Schmidt, and J. Häkkilä. Understanding the Impact of Abstracted Audio Preview of SMS. In Proceedings of the 28th International Conference on Human Factors in Computing Systems (CHI’10), Association for Computing Machinery, New York, NY, USA, 2010, p. 1735–1738. doi:10.1145/1753326.1753585
[BibTeX] [Abstract] [PDF]
Despite the availability of other mobile messaging applications, SMS has kept its position as a heavily used communication technology. However, there are many situations in which it is inconvenient or inappropriate to check a message’s content immediately. In this paper, we introduce the concept of audio previews of SMS. Based on a real-time analysis of the content of a message, we provide auditory cues in addition to the notification tone upon receiving an SMS. We report on a field trial with 20 participants and show that the use of audio-enhanced SMS affects the reading and writing behavior of users. Our work is motivated by the results of an online survey among 347 SMS users of whose we analyzed 3400 text messages.
@InProceedings{sahami2010chi,
author = {Sahami Shirazi, Alireza and Sarjanoja, Ari-Heikki and Alt, Florian and Schmidt, Albrecht and H\"{a}kkil\"{a}, Jonna},
booktitle = {{Proceedings of the 28th International Conference on Human Factors in Computing Systems}},
title = {{Understanding the Impact of Abstracted Audio Preview of SMS}},
year = {2010},
address = {New York, NY, USA},
note = {sahami2010chi},
pages = {1735--1738},
publisher = {Association for Computing Machinery},
series = {CHI'10},
abstract = {Despite the availability of other mobile messaging applications, SMS has kept its position as a heavily used communication technology. However, there are many situations in which it is inconvenient or inappropriate to check a message's content immediately. In this paper, we introduce the concept of audio previews of SMS. Based on a real-time analysis of the content of a message, we provide auditory cues in addition to the notification tone upon receiving an SMS. We report on a field trial with 20 participants and show that the use of audio-enhanced SMS affects the reading and writing behavior of users. Our work is motivated by the results of an online survey among 347 SMS users of whose we analyzed 3400 text messages.},
acmid = {1753585},
doi = {10.1145/1753326.1753585},
isbn = {978-1-60558-929-9},
keywords = {auditory ui, emoticon, mobile phone, sms, user studies},
location = {Atlanta, Georgia, USA},
numpages = {4},
timestamp = {2010.01.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/sahami2010chi.pdf},
}

2009

F. Alt, M. Balz, S. Kristes, A. S. Shirazi, J. Mennenöh, A. Schmidt, H. Schröder, and M. Gödicke. Adaptive User Profiles in Pervasive Advertising Environments. In Proceedings of the European Conference on Ambient Intelligence (AmI’09), Springer-Verlag, Berlin, Heidelberg, 2009, p. 276–286. doi:10.1007/978-3-642-05408-2_32
[BibTeX] [Abstract] [PDF]
Nowadays modern advertising environments try to provide more efficient ads by targeting costumers based on their interests. Various approaches exist today as to how information about the users’ interests can be gathered. Users can deliberately and explicitly provide this information or user’s shopping behaviors can be analyzed implicitly. We implemented an advertising platform to simulate an advertising environment and present adaptive profiles, which let users setup profiles based on a self-assessment, and enhance those profiles with information about their real shopping behavior as well as about their activity intensity. Additionally, we explain how pervasive technologies such as Bluetooth can be used to create a profile anonymously and unobtrusively.
@InProceedings{alt2009ami,
author = {Alt, Florian and Balz, Moritz and Kristes, Stefanie and Shirazi, Alireza Sahami and Mennen\"{o}h, Julian and Schmidt, Albrecht and Schr\"{o}der, Hendrik and G\"{o}dicke, Michael},
booktitle = {{Proceedings of the European Conference on Ambient Intelligence}},
title = {{Adaptive User Profiles in Pervasive Advertising Environments}},
year = {2009},
address = {Berlin, Heidelberg},
month = {nov},
note = {alt2009ami},
pages = {276--286},
publisher = {Springer-Verlag},
series = {AmI'09},
abstract = {Nowadays modern advertising environments try to provide more efficient ads by targeting costumers based on their interests. Various approaches exist today as to how information about the users’ interests can be gathered. Users can deliberately and explicitly provide this information or user’s shopping behaviors can be analyzed implicitly. We implemented an advertising platform to simulate an advertising environment and present adaptive profiles, which let users setup profiles based on a self-assessment, and enhance those profiles with information about their real shopping behavior as well as about their activity intensity. Additionally, we explain how pervasive technologies such as Bluetooth can be used to create a profile anonymously and unobtrusively.},
acmid = {1694666},
doi = {10.1007/978-3-642-05408-2_32},
isbn = {978-3-642-05407-5},
location = {Salzburg, Austria},
numpages = {11},
timestamp = {2009.11.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009ami.pdf},
}
F. Alt, A. S. Shirazi, M. Pfeiffer, P. Holleis, and A. S. (Workshop).. TaxiMedia: An Interactive Context-Aware Entertainment and Advertising System. In Proceedings of the Second International Workshop on Pervasive Advertising (PerAd’09), Lübeck, Germany, 2009.
[BibTeX] [Abstract] [PDF]
The use of public transport vehicles, such as trams, buses, and taxis asan advertising space is increasing since several years. However mainly the outsideof the vehicles is used to show advertisements using paintings, foil or roofmounteddisplays. Nowadays, with advances in display technologies, small highresolutiondisplays can be easily embedded in vehicles and be used forentertainment or advertising purposes. In this paper we introduce an interactivecontext-ware advertising system designed for cabs, which is targeted to offercontext-aware information such as advertisements, points of interest, events, etc.during a cab ride. Additionally it is possible for advertisers to upload their contentsand define areas where their advertisements should be shown.
@InProceedings{alt2009perad2,
author = {Florian Alt AND Alireza Sahami Shirazi AND Max Pfeiffer AND Paul Holleis AND Albrecht Schmidt (Workshop).},
booktitle = {{Proceedings of the Second International Workshop on Pervasive Advertising}},
title = {{TaxiMedia: An Interactive Context-Aware Entertainment and Advertising System}},
year = {2009},
address = {L\"{u}beck, Germany},
month = {oct},
note = {alt2009perad2},
series = {PerAd'09},
abstract = {The use of public transport vehicles, such as trams, buses, and taxis asan advertising space is increasing since several years. However mainly the outsideof the vehicles is used to show advertisements using paintings, foil or roofmounteddisplays. Nowadays, with advances in display technologies, small highresolutiondisplays can be easily embedded in vehicles and be used forentertainment or advertising purposes. In this paper we introduce an interactivecontext-ware advertising system designed for cabs, which is targeted to offercontext-aware information such as advertisements, points of interest, events, etc.during a cab ride. Additionally it is possible for advertisers to upload their contentsand define areas where their advertisements should be shown.},
owner = {flo},
timestamp = {2009.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009perad2.pdf},
}
A. S. Shirazi, F. Alt, A. Schmidt, A. Sarjanoja, L. Hynninen, J. Häkkilä, and P. Holleis. Emotion Sharing Via Self-Composed Melodies on Mobile Phones. In Proceedings of the 11th International Conference on Human-Computer Interaction with Mobile Devices and Services (MobileHCI’09), Association for Computing Machinery, New York, NY, USA, 2009, p. 301–304. doi:10.1145/1613858.1613897
[BibTeX] [Abstract] [PDF]
In their role as personal communication devices, mobile phones are a natural choice for sharing and communicating emotions. However, their functionalities are currently very limited in power to express affective messages. In this paper, we describe the design of a system that allows users to easily compose melodies and share them via mobile phones. We show that by using these melodies information about the current emotional state of the sender can be expressed and recognized synchronously by the receiver in a simple, quick, and unobtrusive way. Further, we reveal that self-composed melodies have a stronger impact than pre-composed or downloaded messages, similar to crafted pieces of art offered to a beloved person. We then present findings from a user study that assesses the implementation of a functional prototype and the adequacy of the system for emotional communication.
@InProceedings{sahami2009mobilehci,
author = {Shirazi, Alireza Sahami and Alt, Florian and Schmidt, Albrecht and Sarjanoja, Ari-Heikki and Hynninen, Lotta and H\"{a}kkil\"{a}, Jonna and Holleis, Paul},
booktitle = {{Proceedings of the 11th International Conference on Human-Computer Interaction with Mobile Devices and Services}},
title = {{Emotion Sharing Via Self-Composed Melodies on Mobile Phones}},
year = {2009},
address = {New York, NY, USA},
note = {sahami2009mobilehci},
pages = {301--304},
publisher = {Association for Computing Machinery},
series = {MobileHCI'09},
abstract = {In their role as personal communication devices, mobile phones are a natural choice for sharing and communicating emotions. However, their functionalities are currently very limited in power to express affective messages. In this paper, we describe the design of a system that allows users to easily compose melodies and share them via mobile phones. We show that by using these melodies information about the current emotional state of the sender can be expressed and recognized synchronously by the receiver in a simple, quick, and unobtrusive way. Further, we reveal that self-composed melodies have a stronger impact than pre-composed or downloaded messages, similar to crafted pieces of art offered to a beloved person. We then present findings from a user study that assesses the implementation of a functional prototype and the adequacy of the system for emotional communication.},
acmid = {1613897},
articleno = {30},
doi = {10.1145/1613858.1613897},
isbn = {978-1-60558-281-8},
keywords = {composer, emotion sharing, mobile phone, synchronous},
location = {Bonn, Germany},
numpages = {4},
timestamp = {2009.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/sahami2009mobilehci.pdf},
}
F. Alt, A. Schmidt, R. Atterer, and P. Holleis. Bringing Web 2.0 to the Old Web: A Platform for Parasitic Applications. In Proceedings of the 12th IFIP TC 13 International Conference on Human-Computer Interaction: Part I (INTERACT’09), Springer-Verlag, Berlin, Heidelberg, 2009, p. 405–418. doi:10.1007/978-3-642-03655-2_44
[BibTeX] [Abstract] [PDF]
It is possible to create interactive, responsive web applications that allow user-generated contributions. However, the relevant technologies have to be explicitly deployed by the authors of the web pages. In this work we present the concept of parasitic and symbiotic web applications which can be deployed on arbitrary web pages by means of a proxy-based application platform. Such applications are capable of inserting, editing and deleting the content of web pages. We use an HTTP proxy in order to insert JavaScript code on each web page that is delivered from the web server to the browser. Additionally we use a database server hosting user-generated scripts as well as high-level APIs allowing for implementing customized web applications. Our approach is capable of cooperating with existing web pages by using shared standards (e.g. formatting of the structure on DOM level) and common APIs but also allows for user-generated (parasitic) applications on arbitrary web pages without the need for cooperation by the page owner.
@InProceedings{alt2009interact,
author = {Alt, Florian and Schmidt, Albrecht and Atterer, Richard and Holleis, Paul},
booktitle = {{Proceedings of the 12th IFIP TC 13 International Conference on Human-Computer Interaction: Part I}},
title = {{Bringing Web 2.0 to the Old Web: A Platform for Parasitic Applications}},
year = {2009},
address = {Berlin, Heidelberg},
month = {sep},
note = {alt2009interact},
pages = {405--418},
publisher = {Springer-Verlag},
series = {INTERACT'09},
abstract = {It is possible to create interactive, responsive web applications that allow user-generated contributions. However, the relevant technologies have to be explicitly deployed by the authors of the web pages. In this work we present the concept of parasitic and symbiotic web applications which can be deployed on arbitrary web pages by means of a proxy-based application platform. Such applications are capable of inserting, editing and deleting the content of web pages. We use an HTTP proxy in order to insert JavaScript code on each web page that is delivered from the web server to the browser. Additionally we use a database server hosting user-generated scripts as well as high-level APIs allowing for implementing customized web applications. Our approach is capable of cooperating with existing web pages by using shared standards (e.g. formatting of the structure on DOM level) and common APIs but also allows for user-generated (parasitic) applications on arbitrary web pages without the need for cooperation by the page owner.},
acmid = {1615858},
doi = {10.1007/978-3-642-03655-2_44},
isbn = {978-3-642-03654-5},
location = {Uppsala, Sweden},
numpages = {14},
timestamp = {2009.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009interact.pdf},
}
F. Alt, C. Evers, and A. Schmidt. Mobile Public Display Systems. In Adjunct Proceedings of the Tenth Workshop on Mobile Computing, Systems, and Applications (HotMobile’09), Santa Cruz, CA, USA, 2009.
[BibTeX] [PDF]
@InProceedings{alt2009hotmobileadj,
author = {Florian Alt AND Christoph Evers AND Albrecht Schmidt},
booktitle = {{Adjunct Proceedings of the Tenth Workshop on Mobile Computing, Systems, and Applications}},
title = {{Mobile Public Display Systems}},
year = {2009},
address = {Santa Cruz, CA, USA},
month = {jun},
note = {alt2009hotmobileadj},
series = {HotMobile'09},
owner = {flo},
timestamp = {2009.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009hotmobileadj.pdf},
}
F. Alt, A. Schmidt, and C. Evers. Mobile Contextual Displays. In Proceedings of the First International Workshop on Pervasive Advertising (PerAd’09), Nara, Japan, 2009.
[BibTeX] [PDF]
@InProceedings{alt2009perad1,
author = {Florian Alt AND Albrecht Schmidt AND Christoph Evers},
booktitle = {{Proceedings of the First International Workshop on Pervasive Advertising}},
title = {{Mobile Contextual Displays}},
year = {2009},
address = {Nara, Japan},
month = {jun},
note = {alt2009perad1},
series = {PerAd'09},
owner = {flo},
timestamp = {2009.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009perad1.pdf},
}
F. Alt, C. Evers, and A. Schmidt. Users’ View on Context-Sensitive Car Advertisements. In Proceedings of the 7th International Conference on Pervasive Computing (Pervasive’09), Springer-Verlag, Berlin, Heidelberg, 2009, p. 9–16. doi:10.1007/978-3-642-01516-8_2
[BibTeX] [Abstract] [PDF]
Cars are ubiquitous and offer large and often highly visible surfaces that can be used as advertising space. Until now, advertising in this domain has focused on commercial vehicles, and advertisements have been painted on and were therefore static, with the exception of car-mounted displays that offer dynamic content. With new display technologies, we expect static displays or uniformly-painted surfaces (e.g. onto car doors or the sides of vans and trucks) to be replaced with embedded dynamic displays. We also see an opportunity for advertisements to be placed on non-commercial cars: results of our online survey with 187 drivers show that more than half of them have an interest in displaying advertising on their cars under two conditions: (1) they will receive financial compensation, and (2) there will be a means for them to influence the type of advertisements shown. Based on these findings, as well as further interviews with car owners and a car fleet manager, we discuss the requirements for a context-aware advertising platform, including a context-advertising editor and contextual content distribution system. We describe an implementation of the system that includes components for car owners to describe their preferences and for advertisers to contextualize their ad content and distribution mechanism.
@InProceedings{alt2009pervasive,
author = {Alt, Florian and Evers, Christoph and Schmidt, Albrecht},
booktitle = {{Proceedings of the 7th International Conference on Pervasive Computing}},
title = {{Users' View on Context-Sensitive Car Advertisements}},
year = {2009},
address = {Berlin, Heidelberg},
month = {jun},
note = {alt2009pervasive},
pages = {9--16},
publisher = {Springer-Verlag},
series = {Pervasive'09},
abstract = {Cars are ubiquitous and offer large and often highly visible surfaces that can be used as advertising space. Until now, advertising in this domain has focused on commercial vehicles, and advertisements have been painted on and were therefore static, with the exception of car-mounted displays that offer dynamic content. With new display technologies, we expect static displays or uniformly-painted surfaces (e.g. onto car doors or the sides of vans and trucks) to be replaced with embedded dynamic displays. We also see an opportunity for advertisements to be placed on non-commercial cars: results of our online survey with 187 drivers show that more than half of them have an interest in displaying advertising on their cars under two conditions: (1) they will receive financial compensation, and (2) there will be a means for them to influence the type of advertisements shown. Based on these findings, as well as further interviews with car owners and a car fleet manager, we discuss the requirements for a context-aware advertising platform, including a context-advertising editor and contextual content distribution system. We describe an implementation of the system that includes components for car owners to describe their preferences and for advertisers to contextualize their ad content and distribution mechanism.},
acmid = {1560007},
doi = {10.1007/978-3-642-01516-8_2},
isbn = {978-3-642-01515-1},
location = {Nara, Japan},
numpages = {8},
timestamp = {2009.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009pervasive.pdf},
}

2008

A. Schmidt, F. Alt, P. Holleis, J. Mueller, and A. Krueger. Creating Log Files and Click Streams for Advertisements in Physical Space. In Adjunct Proceedings of the 10th International Conference on Ubiquitous Computing (Ubicomp’08), Seoul, South Korea, 2008, p. 28–29.
[BibTeX] [Abstract] [PDF]
Poster advertisement has a long tradition and is transformingrapidly into digital media. In this paper we provide anoverview of how sensing can be used to create online andup to date information about potential viewers. We assesswhat application domains can benefit from continuousmonitoring of visitors. As measuring with simple sensors isinherently error prone we suggest the notion of comparativeadvertising power which compares the number of potentialviewers in different locations. We address user acceptanceand privacy concerns and show technical mechanism toincrease privacy.
@Conference{schmidt2008ubicompadj,
author = {Schmidt, A. and Alt, F. and Holleis, P. and Mueller, J. and Krueger, A.},
booktitle = {{Adjunct Proceedings of the 10th International Conference on Ubiquitous Computing}},
title = {{Creating Log Files and Click Streams for Advertisements in Physical Space}},
year = {2008},
address = {Seoul, South Korea},
note = {schmidt2008ubicompadj},
pages = {28--29},
series = {Ubicomp'08},
abstract = {Poster advertisement has a long tradition and is transformingrapidly into digital media. In this paper we provide anoverview of how sensing can be used to create online andup to date information about potential viewers. We assesswhat application domains can benefit from continuousmonitoring of visitors. As measuring with simple sensors isinherently error prone we suggest the notion of comparativeadvertising power which compares the number of potentialviewers in different locations. We address user acceptanceand privacy concerns and show technical mechanism toincrease privacy.},
timestamp = {2008.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schmidt2008ubicompadj.pdf},
}

2007

F. Alt, A. Sahami Shirazi, and A. Schmidt. Monitoring Heartbeat per Day to Motivate Increasing Physical Activity. In Proceedings of the Ubicomp Workshop on Interaction with Ubiquitous Wellness and Healthcare Applications (UbiWell’07), Innsbruck, Austria, 2007.
[BibTeX] [Abstract] [PDF]
Physical activity is one of the most basic humanfunctions and essential for physical and mental health andhas major beneficial effect on chronic diseases such asheart disease, stroke, etc. Awareness of this condition hasbeen one of the focuses for researching over recent years.One common way for making this awareness is monitoringthe number of steps taken by a person and comparing itwith the minimum amount of steps s/he needs. In thispaper we suggest that instead of this, heartbeat can bemonitored to aware physical activity.
@InProceedings{alt2008ubicompadj,
author = {Alt, Florian AND Sahami Shirazi, Alireza AND Schmidt, Albrecht},
booktitle = {{Proceedings of the Ubicomp Workshop on Interaction with Ubiquitous Wellness and Healthcare Applications}},
title = {{Monitoring Heartbeat per Day to Motivate Increasing Physical Activity}},
year = {2007},
address = {Innsbruck, Austria},
note = {alt2008ubicompadj},
series = {UbiWell'07},
abstract = {Physical activity is one of the most basic humanfunctions and essential for physical and mental health andhas major beneficial effect on chronic diseases such asheart disease, stroke, etc. Awareness of this condition hasbeen one of the focuses for researching over recent years.One common way for making this awareness is monitoringthe number of steps taken by a person and comparing itwith the minimum amount of steps s/he needs. In thispaper we suggest that instead of this, heartbeat can bemonitored to aware physical activity.},
owner = {flo},
timestamp = {2007.03.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2008ubicompadj.pdf},
}

2006

A. Schmidt, F. Alt, D. Wilhelm, J. Niggemann, and H. Feussner. Experimenting with Ubiquitous Computing Technologies in Productive Environments. Elektrotechnik und Informationstechnik, vol. 123, iss. 4, pp. 135-139, 2006.
[BibTeX] [Abstract] [PDF]
Ubiquitous computing techniques are ideal tools to bring new solutions to environments which are otherwise quite resistant to rapidchange. In this paper we present techniques to carry out experiments in the very heterogeneous environment of a hospital’s decisionmaking conference, the ‘‘tumour board’’. Introducing the concept of surface interaction we demonstrate how information from varioussources such as X-ray film, slide presentations and projections of CT scans together with oral comments and typed notes can be capturedand made available for surgeons’ use in the operating theatre, without interfering with the ‘‘old’’ way of holding the meeting and withoutputting any extra burden on the hospital staff.
@Article{schmidt2006elektrotechnik,
author = {Albrecht Schmidt and Florian Alt and Dirk Wilhelm and J{\"o}rg Niggemann and Hubertus Feussner},
journal = {{Elektrotechnik und Informationstechnik}},
title = {{Experimenting with Ubiquitous Computing Technologies in Productive Environments}},
year = {2006},
note = {schmidt2006elektrotechnik},
number = {4},
pages = {135-139},
volume = {123},
abstract = {Ubiquitous computing techniques are ideal tools to bring new solutions to environments which are otherwise quite resistant to rapidchange. In this paper we present techniques to carry out experiments in the very heterogeneous environment of a hospital’s decisionmaking conference, the ‘‘tumour board’’. Introducing the concept of surface interaction we demonstrate how information from varioussources such as X-ray film, slide presentations and projections of CT scans together with oral comments and typed notes can be capturedand made available for surgeons’ use in the operating theatre, without interfering with the ‘‘old’’ way of holding the meeting and withoutputting any extra burden on the hospital staff.},
timestamp = {2006.05.07},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schmidt2006elektrotechnik.pdf},
}