## Publications

### 2021

F. Alt. Pervasive security and privacy—a brief reflection on challenges and opportunities. Ieee pervasive computing, vol. 20, iss. 4, p. 5, 2021. doi:10.1109/MPRV.2021.3110539
[BibTeX] [PDF]
@Article{alt2021ieeepvc,
author = {Florian Alt},
journal = {IEEE Pervasive Computing},
title = {Pervasive Security and Privacy—A Brief Reflection on Challenges and Opportunities},
year = {2021},
issn = {1558-2590},
month = dec,
note = {alt2021ieeepvc},
number = {4},
pages = {5},
volume = {20},
doi = {10.1109/MPRV.2021.3110539},
timestamp = {2021.12.09},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2021ieeepvc.pdf},
}
F. Alt and S. Schneegass. Beyond passwords—challenges and opportunities of future authentication. Ieee security & privacy, 2021.
[BibTeX] [PDF]
@Article{alt2021ieeesp,
author = {Florian Alt AND Stefan Schneegass},
journal = {IEEE Security \& Privacy},
title = {Beyond Passwords—Challenges and Opportunities of Future Authentication},
year = {2021},
note = {alt2021ieeesp},
timestamp = {2021.12.09},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2021ieeesp.pdf},
}
R. Rivu, V. Mäkelä, S. Prange, S. D. Rodriguez, R. Piening, Y. Zhou, K. Köhle, K. Pfeuffer, Y. Abdelrahman, M. Hoppe, A. Schmidt, and F. Alt. Remote VR Studies – A Framework for Running Virtual Reality Studies Remotely Via Participant-Owned HMDs. ACM Transaytions on Computer-Human Interaction (ToCHI), 2021.
[BibTeX] [PDF]
@Article{rivu2021tochi,
author = {Radiah Rivu and Ville Mäkelä and Sarah Prange and Sarah Delgado Rodriguez and Robin Piening and Yumeng Zhou and Kay Köhle and Ken Pfeuffer and Yomna Abdelrahman and Matthias Hoppe and Albrecht Schmidt and Florian Alt},
journal = {{ACM Transaytions on Computer-Human Interaction (ToCHI)}},
title = {{Remote VR Studies -- A Framework for Running Virtual Reality Studies Remotely Via Participant-Owned HMDs}},
year = {2021},
note = {rivu2021tochi},
address = {New York, NY, USA},
publisher = {ACM},
timestamp = {2021.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021tochi},
}
M. Froehlich, P. Hulm, and F. Alt. Under Pressure – A User-Centered Threat Model for Cryptocurrency Owners. In Proceedings of the Fourth International Conference on Blockchain Technology and Applications (ICBTA ’21), 2021.
[BibTeX] [Abstract] [PDF] [Video]
Cryptocurrencies have gained popularity in recent years. However, for many users, keeping ownership of their cryptocurrency is a complex task. News reports frequently bear witness to scams, hacked exchanges, and fortunes beyond retrieval. However, we lack a systematic understanding of user-centered cryptocurrency threats, as causes leading to loss are scattered across publications. To address this gap, we conducted a focus group (n=6) and an expert elicitation study (n=25) following a three-round Delphi process with a heterogeneous group of blockchain and security experts from academia and industry.We contribute the first systematic overview of threats cryptocurrency users are exposed to and propose six overarching categories. Our work is complemented by a discussion on how the human-computer-interaction community can address these threats and how practitioners can use the model to understand situations in which users might find themselves under the pressure of an attack to ultimately engineer more secure systems.
@InProceedings{froehlich2021icbta,
author = {Michael Froehlich AND Philipp Hulm AND Florian Alt},
booktitle = {{Proceedings of the Fourth International Conference on Blockchain Technology and Applications}},
title = {{Under Pressure - A User-Centered Threat Model for Cryptocurrency Owners}},
year = {2021},
note = {froehlich2021icbta},
series = {ICBTA '21},
abstract = {Cryptocurrencies have gained popularity in recent years. However, for many users, keeping ownership of their cryptocurrency is a complex task. News reports frequently bear witness to scams, hacked exchanges, and fortunes beyond retrieval. However, we lack a systematic understanding of user-centered cryptocurrency threats, as causes leading to loss are scattered across publications. To address this gap, we conducted a focus group (n=6) and an expert elicitation study (n=25) following a three-round Delphi process with a heterogeneous group of blockchain and security experts from academia and industry.We contribute the first systematic overview of threats cryptocurrency users are exposed to and propose six overarching categories. Our work is complemented by a discussion on how the human-computer-interaction community can address these threats and how practitioners can use the model to understand situations in which users might find themselves under the pressure of an attack to ultimately engineer more secure systems.},
location = {Virtual},
owner = {florian},
timestamp = {2021.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/froehlich2021icbta.pdf},
video = {froehlich2021icbta},
}
M. Braun, F. Weber, and F. Alt. Affective Automotive User Interfaces – Reviewing the State of Driver Affect Research and Emotion Regulation in the Car. Acm computing surveys, 2021.
[BibTeX] [Abstract] [PDF]
Affective technology offers exciting opportunities to improve road safety by catering to human emotions.Modern car interiors enable the contactless detection of user states, paving the way for a systematic promotionof safe driver behavior through emotion regulation. We review the current literature regarding the impact ofemotions on driver behavior and analyze the state of emotion regulation approaches in the car. We summarizechallenges for affective interaction in form of technological hurdles and methodological considerations, as wellas opportunities to improve road safety by reinstating drivers into an emotionally balanced state. The purposeof this review is to outline the community’s combined knowledge for interested researchers, to provide afocussed introduction for practitioners, raise awareness for cultural aspects, and to identify future directionsfor affective interaction in the car.
@Article{braun2021csur,
author = {Michael Braun AND Florian Weber AND Florian Alt},
journal = {ACM Computing Surveys},
title = {{Affective Automotive User Interfaces – Reviewing the State of Driver Affect Research and Emotion Regulation in the Car}},
year = {2021},
note = {braun2021csur},
abstract = {Affective technology offers exciting opportunities to improve road safety by catering to human emotions.Modern car interiors enable the contactless detection of user states, paving the way for a systematic promotionof safe driver behavior through emotion regulation. We review the current literature regarding the impact ofemotions on driver behavior and analyze the state of emotion regulation approaches in the car. We summarizechallenges for affective interaction in form of technological hurdles and methodological considerations, as wellas opportunities to improve road safety by reinstating drivers into an emotionally balanced state. The purposeof this review is to outline the community’s combined knowledge for interested researchers, to provide afocussed introduction for practitioners, raise awareness for cultural aspects, and to identify future directionsfor affective interaction in the car.},
address = {New York, NY, USA},
issue_date = {March 2021},
numpages = {25},
publisher = {Association for Computing Machinery},
timestamp = {2021.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2021csur.pdf},
}
A. Nussbaum, J. Schuette, L. Hao, H. Schulzrinne, and F. Alt. Tremble: TRansparent Emission Monitoring with BLockchain Endorsement. In Proceedings of the 21 ieee international conference on internet of things (iThings’21), IEEE, 2021.
[BibTeX] [Abstract] [PDF]
Since the monitoring of environmental emissions is mostly in the hands of regulatory authorities, collected data may not be easily observed by the interested public. Centrally stored data may also tempt the authorities or others to manipulate the historical record for political or liability reasons. To enable timely, transparent and integrity-protected collection and presentation of emission data, we propose and implement Tremble, an emission monitoring system based on blockchain and IoT sensors. Tremble employs a hybrid storage approach to lower the cost of storage compared to using a pure blockchain without losing data integrity. It provides web interfaces and visualizations for end users to query emission values they are concerned about. Qualitative and quantitative studies involving a total of 62 subjects demonstrate the usability of the system.
@InProceedings{nussbaum2021ithings,
author = {Alexander Nussbaum AND Johannes Schuette AND Luoyao Hao AND Henning Schulzrinne AND Florian Alt},
booktitle = {Proceedings of the 21 IEEE International Conference on Internet of Things},
title = {{Tremble: TRansparent Emission Monitoring with BLockchain Endorsement}},
year = {2021},
note = {nussbaum2021ithings},
publisher = {IEEE},
series = {iThings'21},
abstract = {Since the monitoring of environmental emissions is mostly in the hands of regulatory authorities, collected data may not be easily observed by the interested public. Centrally stored data may also tempt the authorities or others to manipulate the historical record for political or liability reasons. To enable timely, transparent and integrity-protected collection and presentation of emission data, we propose and implement Tremble, an emission monitoring system based on blockchain and IoT sensors. Tremble employs a hybrid storage approach to lower the cost of storage compared to using a pure blockchain without losing data integrity. It provides web interfaces and visualizations for end users to query emission values they are concerned about. Qualitative and quantitative studies involving a total of 62 subjects demonstrate the usability of the system.},
location = {Melbourne, Australia},
timestamp = {2021.11.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/nussbaum2021ithings.pdf},
}
K. Marky, S. Prange, M. Mühlhäuser, and F. Alt. Roles Matter! Understanding Differences in the Privacy Mental Models ofSmart Home Visitors and Resident. In Proceedings of the 19th International Conference on Mobile and Ubiquitous Multimedia (MUM’21), ACM, New York, NY, USA, 2021.
[BibTeX] [Abstract] [PDF]
In this paper, we contribute an in-depth study of the mental models of various roles in smart home ecosystems. In particular, we compared mental models regarding data collection among residents (primary users) and visitors of a smart home in a qualitative study (N=30) to better understand how bystanders’ specific privacy needs can be addressed. Our results suggest that bystanders have a limited understanding of how smart devices collect and store sensitive data about them. Misconceptions in bystanders’ mental models result in missing awareness and ultimately limit their ability to protect their privacy. We discuss the limitations of existing solutions and challenges for the design of future smart home environments that reflect the privacy concerns of users and bystanders alike, meant to inform the design of future privacy interfaces for IoT devices.
@InProceedings{marky2021mum,
author = {Marky, Karola AND Prange, Sarah AND Mühlhäuser, Max AND Alt, Florian},
booktitle = {{Proceedings of the 19th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Roles Matter! Understanding Differences in the Privacy Mental Models ofSmart Home Visitors and Resident}},
year = {2021},
address = {New York, NY, USA},
note = {marky2021mum},
publisher = {ACM},
series = {MUM'21},
abstract = {In this paper, we contribute an in-depth study of the mental models of various roles in smart home ecosystems. In particular, we compared mental models regarding data collection among residents (primary users) and visitors of a smart home in a qualitative study (N=30) to better understand how bystanders’ specific privacy needs can be addressed. Our results suggest that bystanders have a limited understanding of how smart devices collect and store sensitive data about them. Misconceptions in bystanders' mental models result in missing awareness and ultimately limit their ability to protect their privacy. We discuss the limitations of existing solutions and challenges for the design of future smart home environments that reflect the privacy concerns of users and bystanders alike, meant to inform the design of future privacy interfaces for IoT devices.},
location = {Leuven, Belgium},
timestamp = {2021.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/marky2021mum.pdf},
}
K. Pfeuffer, A. Dinc, J. Obernolte, R. Rivu, Y. Abdrabou, F. Schelter, Y. Abdelrahman, and F. Alt. Bi-3D: Bi-Manual Pen-and-Touch Interaction for 3D Manipulation on Tablets. In Proceedings of the 34th ACM Symposium on User Interface Software and Technology (UIST ’21), Association for Computing Machinery, New York, NY, USA, 2021.
[BibTeX] [Abstract] [PDF] [Video]
Tablets are attractive for design work anywhere, but 3D manipulations are notoriously difficult. We explore how engaging the stylus and multi-touch in concert can render such tasks easier. We introduce Bi-3D, an interaction concept where touch gestures are combined with 2D pen commands for 3D manipulation. For example, for a fast and intuitive 3D drag & drop technique: the pen drags the object on-screen, and parallel pinch-to-zoom moves it in the third dimension. In this paper, we describe the Bi-3D design space, crossing two-handed input and the degrees-of-freedom (DOF) of 3D manipulation and navigation tasks. We demonstrate sketching and manipulation tools in a prototype 3D design application, where users can fluidly combine 3D operations through alternating and parallel use of the modalities. We evaluate the core technique, bi-manual 3DOF input, against widget and mid-air baselines in an object movement task. We find that Bi-3D is a fast and practical way for multi-dimensional manipulation of graphical objects, promising to facilitate 3D design on stylus and tablet devices.
@InProceedings{pfeuffer2021uist,
author = {Ken Pfeuffer AND Abdullatif Dinc AND Jan Obernolte AND Radiah Rivu AND Yasmeen Abdrabou AND Franziska Schelter AND Yomna Abdelrahman AND Florian Alt},
booktitle = {{Proceedings of the 34th ACM Symposium on User Interface Software and Technology}},
title = {{Bi-3D: Bi-Manual Pen-and-Touch Interaction for 3D Manipulation on Tablets}},
year = {2021},
address = {New York, NY, USA},
note = {pfeuffer2021uist},
publisher = {Association for Computing Machinery},
series = {UIST ’21},
abstract = {Tablets are attractive for design work anywhere, but 3D manipulations are notoriously difficult. We explore how engaging the stylus and multi-touch in concert can render such tasks easier. We introduce Bi-3D, an interaction concept where touch gestures are combined with 2D pen commands for 3D manipulation. For example, for a fast and intuitive 3D drag & drop technique: the pen drags the object on-screen, and parallel pinch-to-zoom moves it in the third dimension. In this paper, we describe the Bi-3D design space, crossing two-handed input and the degrees-of-freedom (DOF) of 3D manipulation and navigation tasks. We demonstrate sketching and manipulation tools in a prototype 3D design application, where users can fluidly combine 3D operations through alternating and parallel use of the modalities. We evaluate the core technique, bi-manual 3DOF input, against widget and mid-air baselines in an object movement task. We find that Bi-3D is a fast and practical way for multi-dimensional manipulation of graphical objects, promising to facilitate 3D design on stylus and tablet devices.},
location = {virtual},
timestamp = {2021.10.10},
url = {http://florian-alt.org/unibw/wp-content/publications/pfeuffer2021uist.pdf},
video = {pfeuffer2021uist},
}
F. Alt, D. Buschek, D. Heuss, and J. Müller. Orbuculum – Predicting When Users Intend To Leave Large Public Displays. Proc. acm interact. mob. wearable ubiquitous technol., 2021.
[BibTeX] [Abstract] [PDF]
We present a system, predicting the point in time when users of a public display are about to leave. The ability to react to users’ intention to leave is valuable for researchers and practitioners alike: users can be presented additional content with the goal to maximize interaction times; they can be offered a discount coupon for redemption in a nearby store hence enabling new business models; or feedback can be collected from users right after they have finished interaction without interrupting their task. Our research consists of multiple steps: (1) We identified features that hint at users’ intention to leave from observations and video logs. (2) We implemented a system capable of detecting such features from Microsoft Kinect’s skeleton data and subsequently make a prediction. (3) We trained and deployed a prediction system with a Quiz game that reacts when users are about to leave (N=249), achieving an accuracy of 78%. The majority of users indeed reacted to the presented intervention.
@Article{alt2021imwut,
author = {Florian Alt AND Daniel Buschek AND David Heuss AND J\"{o}rg M\"{u}ller},
journal = {Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.},
title = {{Orbuculum -- Predicting When Users Intend To Leave Large Public Displays}},
year = {2021},
note = {alt2021imwut},
abstract = {We present a system, predicting the point in time when users of a public display are about to leave. The ability to react to users’ intention to leave is valuable for researchers and practitioners alike: users can be presented additional content with the goal to maximize interaction times; they can be offered a discount coupon for redemption in a nearby store hence enabling new business models; or feedback can be collected from users right after they have finished interaction without interrupting their task. Our research consists of multiple steps: (1) We identified features that hint at users’ intention to leave from observations and video logs. (2) We implemented a system capable of detecting such features from Microsoft Kinect’s skeleton data and subsequently make a prediction. (3) We trained and deployed a prediction system with a Quiz game that reacts when users are about to leave (N=249), achieving an accuracy of 78%. The majority of users indeed reacted to the presented intervention.},
address = {New York, NY, USA},
articleno = {47},
issue_date = {Mar 2021},
numpages = {24},
publisher = {Association for Computing Machinery},
timestamp = {2021.10.01},
url = {http://florian-alt.org/unibw/wp-content/publications/alt2021imwut.pdf},
}
A. Saad, J. Liebers, U. Gruenefeld, F. Alt, and S. Schneegass. Understanding Bystanders’ Tendency to Shoulder Surf Smartphones Using 360-degree Videos in Virtual Reality. In Proceedings of the 22nd International Conference on Human-Computer Interaction with Mobile Devices and Services (MobileHCI ’21), ACM, New York, NY, USA, 2021.
[BibTeX] [Abstract] [PDF]
Shoulder surfing is an omnipresent risk for smartphone users. However, investigating these attacks in the wild is difficult because of either privacy concerns, lack of consent, or the fact that asking for consent would influence people’s behavior (e.g., they could try to avoid looking at smartphones). Thus, we propose utilizing 360-degree videos in Virtual Reality (VR), recorded in staged real-life situations on public transport. Despite differences between perceiving videos in VR and experiencing real-world situations, we believe this approach to allow novel insights on observers’ tendency to shoulder surf another person’s phone authentication and interaction to be gained. By conducting a study (N=16), we demonstrate that a better understanding of shoulder surfers’ behavior can be obtained by analyzing gaze data during video watching and comparing it to post-hoc interview responses. On average, participants looked at the phone for about 11% of the time it was visible and could remember half of the applications used.
@InProceedings{saad2021mobilehci,
author = {Alia Saad AND Jonathan Liebers AND Uwe Gruenefeld AND Florian Alt AND Stefan Schneegass},
booktitle = {{Proceedings of the 22nd International Conference on Human-Computer Interaction with Mobile Devices and Services}},
title = {{Understanding Bystanders' Tendency to Shoulder Surf Smartphones Using 360-degree Videos in Virtual Reality}},
year = {2021},
address = {New York, NY, USA},
publisher = {ACM},
series = {MobileHCI '21},
abstract = {Shoulder surfing is an omnipresent risk for smartphone users. However, investigating these attacks in the wild is difficult because of either privacy concerns, lack of consent, or the fact that asking for consent would influence people's behavior (e.g., they could try to avoid looking at smartphones). Thus, we propose utilizing 360-degree videos in Virtual Reality (VR), recorded in staged real-life situations on public transport. Despite differences between perceiving videos in VR and experiencing real-world situations, we believe this approach to allow novel insights on observers' tendency to shoulder surf another person's phone authentication and interaction to be gained. By conducting a study (N=16), we demonstrate that a better understanding of shoulder surfers' behavior can be obtained by analyzing gaze data during video watching and comparing it to post-hoc interview responses. On average, participants looked at the phone for about 11% of the time it was visible and could remember half of the applications used.},
location = {Toulouse, France},
timestamp = {2021.09.20},
}
S. Prange, C. George, and F. Alt. Design Considerations for Usable Authentication in Smart Homes. In Proceedings of the Conference on Mensch Und Computer (MuC ’21), Association for Computing Machinery, New York, NY, USA, 2021.
[BibTeX] [PDF]
@InProceedings{prange2021muc,
author = {Sarah Prange and Cenu George AND Florian Alt},
booktitle = {{Proceedings of the Conference on Mensch Und Computer}},
title = {{Design Considerations for Usable Authentication in Smart Homes}},
year = {2021},
address = {New York, NY, USA},
note = {prange2021muc},
publisher = {Association for Computing Machinery},
series = {MuC '21},
timestamp = {2021.09.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2021muc.pdf},
}
R. Rivu, V. Mäkelä, M. Hassib, Y. Abdelrahman, and F. Alt. Exploring how Saliency Affects Attention in Virtual Reality. In Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’21), Springer, Berlin-Heidelberg, Germany, 2021.
[BibTeX] [Abstract] [PDF]
In this paper, we investigate how changes in the saliency of the Virtual Environment (VE) affect our visual attention during different tasks. We investigate if – similar to the real-world – users are attracted to the most salient regions in the VE. This knowledge will help researchers design optimal VR environments, purposefully direct the attention of users, and avoid unintentional distractions. We conducted a user study (N=30) where participants performed tasks (video watching, object stacking, visual search, waiting) with two different saliency conditions in the virtual environment. Our findings suggest that while participants notice the differences in saliency, their visual attention is not diverted towards the salient regions when they are performing tasks.
@InProceedings{rivu2021interact3,
author = {Radiah Rivu AND Ville Mäkelä AND Mariam Hassib AND Yomna Abdelrahman AND Florian Alt},
booktitle = {{Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Exploring how Saliency Affects Attention in Virtual Reality}},
year = {2021},
month = {4},
note = {rivu2021interact3},
publisher = {Springer},
series = {INTERACT '21},
abstract = {In this paper, we investigate how changes in the saliency of the Virtual Environment (VE) affect our visual attention during different tasks.
We investigate if - similar to the real-world - users are attracted to the most salient regions in the VE. This knowledge will help researchers design optimal VR environments, purposefully direct the attention of users, and avoid unintentional distractions. We conducted a user study (N=30) where participants performed tasks (video watching, object stacking, visual search, waiting) with two different saliency conditions in the virtual environment. Our findings suggest that while participants notice the differences in saliency, their visual attention is not diverted towards the salient regions when they are performing tasks.},
day = {1},
language = {English},
location = {Bari, Italy},
owner = {florian},
timestamp = {2021.09.03},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021interact3.pdf},
}
S. Prange, S. Mayer, M. Bittl, M. Hassib, and F. Alt. Investigating User Perceptions Towards Wearable Mobile Electromyography. In Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’21), Springer, Berlin-Heidelberg, Germany, 2021.
[BibTeX] [Abstract] [PDF]
Wearables capture physiological user data, enabling novel user interfaces that can identify users, adapt their user interface, and contribute to the quantified self. At the same time, little is known about users’ perception of this new technology. In this paper, we present findings from a user study (N=36) in which participants used an electromyography (EMG) wearable and a visualization of the data that can be collected using EMG wearables. We found that participants are highly unaware of what EMG data can reveal about them. Allowing them to explore their physiological data makes them more reluctant to share this data. We conclude with deriving guidelines, to help designers of physiological data-based user interfaces to (a) protect users’ privacy, (b) better inform them, and (c) ultimately support the uptake of this technology.
@InProceedings{prange2021interact,
author = {Sarah Prange AND Sven Mayer AND Maria-Lena Bittl AND Mariam Hassib AND Florian Alt},
booktitle = {{Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Investigating User Perceptions Towards Wearable Mobile Electromyography}},
year = {2021},
month = {21},
note = {prange2021interact},
publisher = {Springer},
series = {INTERACT '21},
abstract = {Wearables capture physiological user data, enabling novel user interfaces that can identify users, adapt their user interface, and contribute to the quantified self. At the same time, little is known about users' perception of this new technology. In this paper, we present findings from a user study (N=36) in which participants used an electromyography (EMG) wearable and a visualization of the data that can be collected using EMG wearables. We found that participants are highly unaware of what EMG data can reveal about them. Allowing them to explore their physiological data makes them more reluctant to share this data. We conclude with deriving guidelines, to help designers of physiological data-based user interfaces to (a) protect users' privacy, (b) better inform them, and (c) ultimately support the uptake of this technology.},
day = {1},
language = {English},
location = {Bari, Italy},
owner = {florian},
timestamp = {2021.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2021interact.pdf},
}
R. Rivu, Y. Zhou, R. Welsch, V. Mäkelä, and F. Alt. When Friends become Strangers: Understandingthe Influence of Avatar Gender On InterpersonalDistance in Virtual Reality. In Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’21), Springer, Berlin-Heidelberg, Germany, 2021.
[BibTeX] [Abstract] [PDF]
{In this paper, we investigate how mismatches between biological gender and avatar gender affect interpersonal distance (IPD) in virtual reality (VR). An increasing number of VR experiences and online platforms like Rec Room and VRChat allow users to assume other genders through customized avatars. While the effects of acquaintanceship and gender have been studied with regard to proxemic behavior, the effect of changed genders remains largely unexplored. We conducted a user study (N = 40
@InProceedings{rivu2021interact2,
author = {Radiah Rivu AND Yumang Zhou AND Robin Welsch AND Ville Mäkelä AND Florian Alt},
booktitle = {{Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{When Friends become Strangers: Understandingthe Influence of Avatar Gender On InterpersonalDistance in Virtual Reality}},
year = {2021},
month = {4},
note = {rivu2021interact2},
publisher = {Springer},
series = {INTERACT '21},
abstract = {In this paper, we investigate how mismatches between biological gender and avatar gender affect interpersonal distance (IPD) in virtual reality (VR). An increasing number of VR experiences and online platforms like Rec Room and VRChat allow users to assume other genders through customized avatars. While the effects of acquaintanceship and gender have been studied with regard to proxemic behavior, the effect of changed genders remains largely unexplored. We conducted a user study (N = 40, friends = 20, strangers = 20) where users played a two-player collaborative game in Rec Room using both male and female avatars. We found that with swapped avatar genders, the preferred distance increased between friends but not between strangers. We discuss how our results can inform researchers and designers in the domain of multi-user VR.},
day = {1},
language = {English},
location = {Bari, Italy},
owner = {florian},
timestamp = {2021.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021interact2.pdf},
}
R. Piening, K. Pfeuffer, A. M. Augusto Esteves, S. Prange, P. Schroeder, and F. Alt. Gaze-adaptive Information Access in AR: Empirical Study and Field-Deployment. In Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’21), Springer, Berlin-Heidelberg, Germany, 2021.
[BibTeX] [Abstract] [PDF]
In this paper, we explore how state-of-the-art methods of emotion elicitation can be adapted in virtual reality (VR). We envision that emotion research could be conducted in VR for various benefits, such as switching study conditions and settings on the fly and conducting studies using stimuli that are not easily accessible in the real world such as to induce fear. To this end, we conducted a user study (N=39) where we measured how different emotion elicitation methods (audio, video, image, autobiographical memory recall) perform in VR compared to the real world. We found that elicitation methods produce largely comparable results between VR and the real world, but overall participants experience slightly stronger valence and arousal in VR. Emotions faded over time following the same pattern in both worlds. Our findings are beneficial to researchers and practitioners studying emotional user interfaces in VR.
@InProceedings{piening2021interact,
author = {Robin Piening AND Ken Pfeuffer AND Augusto Esteves, ANDTim Mittermeier AND Sarah Prange AND Philippe Schroeder AND Florian Alt},
booktitle = {{Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Gaze-adaptive Information Access in AR: Empirical Study and Field-Deployment}},
year = {2021},
month = {4},
note = {piening2021interact},
publisher = {Springer},
series = {INTERACT '21},
abstract = {In this paper, we explore how state-of-the-art methods of emotion elicitation can be adapted in virtual reality (VR). We envision that emotion research could be conducted in VR for various benefits, such as switching study conditions and settings on the fly and conducting studies using stimuli that are not easily accessible in the real world such as to induce fear. To this end, we conducted a user study (N=39) where we measured how different emotion elicitation methods (audio, video, image, autobiographical memory recall) perform in VR compared to the real world. We found that elicitation methods produce largely comparable results between VR and the real world, but overall participants experience slightly stronger valence and arousal in VR. Emotions faded over time following the same pattern in both worlds. Our findings are beneficial to researchers and practitioners studying emotional user interfaces in VR.},
day = {1},
language = {English},
location = {Bari, Italy},
owner = {florian},
timestamp = {2021.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/piening2021interact.pdf},
}
R. Rivu, R. Jiang, V. Mäkelä, M. Hassib, and F. Alt. Emotion ElicitationTechniques in Virtual Reality. In Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’21), Springer, Berlin-Heidelberg, Germany, 2021.
[BibTeX] [Abstract] [PDF]
In this paper, we explore how state-of-the-art methods of emotion elicitation can be adapted in virtual reality (VR). We envision that emotion research could be conducted in VR for various benefits, such as switching study conditions and settings on the fly and conducting studies using stimuli that are not easily accessible in the real world such as to induce fear. To this end, we conducted a user study (N=39) where we measured how different emotion elicitation methods (audio, video, image, autobiographical memory recall) perform in VR compared to the real world. We found that elicitation methods produce largely comparable results between VR and the real world, but overall participants experience slightly stronger valence and arousal in VR. Emotions faded over time following the same pattern in both worlds. Our findings are beneficial to researchers and practitioners studying emotional user interfaces in VR.
@InProceedings{rivu2021interact1,
author = {Radiah Rivu AND Ruoyu Jiang AND Ville Mäkelä AND Mariam Hassib AND Florian Alt},
booktitle = {{Proceedings of the 18th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Emotion ElicitationTechniques in Virtual Reality}},
year = {2021},
month = {4},
note = {rivu2021interact1},
publisher = {Springer},
series = {INTERACT '21},
abstract = {In this paper, we explore how state-of-the-art methods of emotion elicitation can be adapted in virtual reality (VR). We envision that emotion research could be conducted in VR for various benefits, such as switching study conditions and settings on the fly and conducting studies using stimuli that are not easily accessible in the real world such as to induce fear. To this end, we conducted a user study (N=39) where we measured how different emotion elicitation methods (audio, video, image, autobiographical memory recall) perform in VR compared to the real world. We found that elicitation methods produce largely comparable results between VR and the real world, but overall participants experience slightly stronger valence and arousal in VR. Emotions faded over time following the same pattern in both worlds. Our findings are beneficial to researchers and practitioners studying emotional user interfaces in VR.},
day = {1},
language = {English},
location = {Bari, Italy},
owner = {florian},
timestamp = {2021.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021interact1.pdf},
}
M. Froehlich, C. Kobiella, A. Schmidt, and F. Alt. Is It Better With Onboarding? Improving First-Time Cryptocurrency App Experiences. In Proceedings of the 2021 ACM Conference on Designing Interactive Systems (DIS ’21), ACM, New York, NY, USA, 2021.
[BibTeX] [Abstract] [PDF] [Video]
Engaging first-time users of mobile apps is challenging. Onboarding task flows are designed to minimize the drop out of users. To this point, there is little scientific insight into how to design these task flows. We explore this question with a specific focus on financial applications, which pose a particularly high hurdle and require significant trust. We address this question by combining two approaches. We first conducted semi-structured interviews (n=16) exploring users’ meaning-making when engaging with new mobile applications in general. We then prototyped and evaluated onboarding task flows (n=16) for two mobile cryptocurrency apps using the minimalist instruction framework. Our results suggest that well-designed onboarding processes can improve the perceived usability of first-time users for feature-rich mobile apps. We discuss how the expectations users voiced during the interview study can be met by applying instructional design principles and reason that the minimalist instruction framework for mobile onboarding insights presents itself as a useful design method for practitioners to develop onboarding processes and also identify when not to.
@InProceedings{froehlich2021dis2,
author = {Michael Froehlich AND Charlotte Kobiella AND Albrecht Schmidt AND Florian Alt},
booktitle = {{Proceedings of the 2021 ACM Conference on Designing Interactive Systems}},
title = {{Is It Better With Onboarding? Improving First-Time Cryptocurrency App Experiences}},
year = {2021},
address = {New York, NY, USA},
note = {froehlich2021dis2},
publisher = {ACM},
series = {DIS '21},
abstract = {Engaging first-time users of mobile apps is challenging. Onboarding task flows are designed to minimize the drop out of users. To this point, there is little scientific insight into how to design these task flows. We explore this question with a specific focus on financial applications, which pose a particularly high hurdle and require significant trust. We address this question by combining two approaches. We first conducted semi-structured interviews (n=16) exploring users' meaning-making when engaging with new mobile applications in general. We then prototyped and evaluated onboarding task flows (n=16) for two mobile cryptocurrency apps using the minimalist instruction framework. Our results suggest that well-designed onboarding processes can improve the perceived usability of first-time users for feature-rich mobile apps. We discuss how the expectations users voiced during the interview study can be met by applying instructional design principles and reason that the minimalist instruction framework for mobile onboarding insights presents itself as a useful design method for practitioners to develop onboarding processes and also identify when not to.},
location = {Virtual},
owner = {florian},
timestamp = {2021.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/froehlich2021dis2.pdf},
video = {froehlich2021dis2},
}
M. Froehlich, M. Wagenhaus, A. Schmidt, and F. Alt. Don’t Stop Me Now! Exploring Challenges Of First-Time Cryptocurrency Users. In Proceedings of the 2021 ACM Conference on Designing Interactive Systems (DIS ’21), ACM, New York, NY, USA, 2021.
[BibTeX] [Abstract] [PDF] [Video]
Cryptocurrencies have increasingly gained interest in practice and research alike. Current research in the HCI community predominantly focuses on understanding the behavior of existing cryptocurrency users. Little attention has been given to early users and the challenges they encounter. However, understanding how interfaces of cryptocurrency systems support, impede, or even prevent adoption through new users is essential to develop better, more inclusive solutions. To close this gap, we conducted a user study(n=34) exploring challenges first-time cryptocurrency users face. Our analysis reveals that even popular wallets are not designed for novice users’ needs, stopping them when they would be ready to engage with the technology. We identify multiple challenges ranging from general user interface issues to finance and cryptocurrency specific ones. We argue that these challenges can and should be addressed by the HCI community and present implications for building better cryptocurrency systems for novice users.
@InProceedings{froehlich2021dis1,
author = {Michael Froehlich AND Maurizio Wagenhaus AND Albrecht Schmidt AND Florian Alt},
booktitle = {{Proceedings of the 2021 ACM Conference on Designing Interactive Systems}},
title = {{Don't Stop Me Now! Exploring Challenges Of First-Time Cryptocurrency Users}},
year = {2021},
address = {New York, NY, USA},
note = {froehlich2021dis1},
publisher = {ACM},
series = {DIS '21},
abstract = {Cryptocurrencies have increasingly gained interest in practice and research alike. Current research in the HCI community predominantly focuses on understanding the behavior of existing cryptocurrency users. Little attention has been given to early users and the challenges they encounter. However, understanding how interfaces of cryptocurrency systems support, impede, or even prevent adoption through new users is essential to develop better, more inclusive solutions. To close this gap, we conducted a user study(n=34) exploring challenges first-time cryptocurrency users face. Our analysis reveals that even popular wallets are not designed for novice users' needs, stopping them when they would be ready to engage with the technology. We identify multiple challenges ranging from general user interface issues to finance and cryptocurrency specific ones. We argue that these challenges can and should be addressed by the HCI community and present implications for building better cryptocurrency systems for novice users.},
location = {Virtual},
owner = {florian},
timestamp = {2021.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/froehlich2021dis1.pdf},
video = {froehlich2021dis1},
}
S. R. R. Rivu, Y. Abdrabou, Y. Abdelrahman, K. Pfeuffer, D. Kern, C. Neuert, D. Buschek, and F. Alt. Did you Understand this? Leveraging Gaze Behavior to Assess Questionnaire Comprehension. In Proceedings of the 2021 ACM Symposium on Eye Tracking Research & Applications (COGAIN ’21), ACM, New York, NY, USA, 2021.
[BibTeX] [Abstract] [PDF] [Talk]
Reading is one of the primary channels to gain information. Due to the growing amount of textual information we encounter, techniques are acquired to decide on the relevance of a text and how to grasp its content. We propose the usage of gaze behaviour as an assistive tool to assess the users’ reading comprehension. We investigate how problems in understanding text – specifically a word or a sentence – while filling in questionnaires are reflected in gaze behaviour. To identify text comprehension problems, while filling a questionnaire, and their correlation with the gaze features, we collected data from 42 participant. In a follow-up study (N=30), we evoked comprehension problems and features they affect and quantified users’ gaze behaviour. Our findings implies that comprehension problems could be reflected in a set of gaze features, namely, in the number of fixations, duration of fixations, and number of regressions. Our findings not only demonstrate the potential of eye tracking for assessing reading comprehension but also pave the way for researchers and designers to build novel questionnaire tools that instantly mitigate problems in reading comprehension.
@InProceedings{rivu2021cogain,
author = {Sheikh Radiah Rahim Rivu AND Yasmeen Abdrabou AND Yomna Abdelrahman AND Ken Pfeuffer AND Dagmar Kern AND Cornelia Neuert AND Daniel Buschek AND Florian Alt},
booktitle = {{Proceedings of the 2021 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{Did you Understand this? Leveraging Gaze Behavior to Assess Questionnaire Comprehension}},
year = {2021},
address = {New York, NY, USA},
note = {rivu2021cogain},
publisher = {ACM},
series = {COGAIN '21},
abstract = {Reading is one of the primary channels to gain information. Due to the growing amount of textual information we encounter, techniques are acquired to decide on the relevance of a text and how to grasp its content. We propose the usage of gaze behaviour as an assistive tool to assess the users’ reading comprehension. We investigate how problems in understanding text – specifically a word or a sentence – while filling in questionnaires are reflected in gaze behaviour. To identify text comprehension problems, while filling a questionnaire, and their correlation with the gaze features, we collected data from 42 participant. In a follow-up study (N=30), we evoked comprehension problems and features they affect and quantified users’ gaze behaviour. Our findings implies that comprehension problems could be reflected in a set of gaze features, namely, in the number of fixations, duration of fixations, and number of regressions. Our findings not only demonstrate the potential of eye tracking for assessing reading comprehension but also pave the way for researchers and designers to build novel questionnaire tools that instantly mitigate problems in reading comprehension.},
location = {Stuttgart, Germany},
numpages = {5},
talk = {rivu2021cogain},
timestamp = {2021.06.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021cogain.pdf},
}
Y. Abdrabou, A. Shams, M. O. Mantawy, A. A. Khan, M. Khamis, F. Alt, and Y. Abdelrahman. GazeMeter: Exploring the Usage of Gaze Behaviour to enhance Password Assessments. In Proceedings of the 2021 ACM Symposium on Eye Tracking Research & Applications (ETRA ’21), ACM, New York, NY, USA, 2021.
[BibTeX] [Abstract] [PDF] [Video]
We investigate the use of gaze behaviour as a means to assess password strength. We contribute to the effort of making users choose passwords that are robust against guessing-attacks. While password policies and meters demonstrated the potential to increase password strength, these approaches provide important cues to attackers as well. Eye tracking enables a novel approach: by analysing people‘s gaze behaviour during password creation, its strength can be determined without revealing its properties. To demonstrate the feasibility of this approach, we present a proof of concept study (N=15) in which we let participants enter weak and strong passwords. Our findings reveal that it is possible to estimate password strength from gaze behaviour using Machine Learning techniques. In this way, we enable research on novel interfaces that motivate people to come-up with stronger passwords.
@InProceedings{abdrabou2021etra,
author = {Yasmeen Abdrabou AND Ahmed Shams AND Mohamed Omar Mantawy AND Anam Ahmad Khan AND Mohamed Khamis AND Florian Alt AND Yomna Abdelrahman},
booktitle = {{Proceedings of the 2021 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{GazeMeter: Exploring the Usage of Gaze Behaviour to enhance Password Assessments}},
year = {2021},
address = {New York, NY, USA},
note = {abdrabou2021etra},
publisher = {ACM},
series = {ETRA '21},
abstract = {We investigate the use of gaze behaviour as a means to assess password strength. We contribute to the effort of making users choose passwords that are robust against guessing-attacks. While password policies and meters demonstrated the potential to increase password strength, these approaches provide important cues to attackers as well. Eye tracking enables a novel approach: by analysing people‘s gaze behaviour during password creation, its strength can be determined without revealing its properties. To demonstrate the feasibility of this approach, we present a proof of concept study (N=15) in which we let participants enter weak and strong passwords. Our findings reveal that it is possible to estimate password strength from gaze behaviour using Machine Learning techniques. In this way, we enable research on novel interfaces that motivate people to come-up with stronger passwords.},
location = {Stuttgart, Germany},
numpages = {8},
owner = {florian},
timestamp = {2021.06.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdrabou2020etra.pdf},
video = {abdrabou2021etra},
}
S. Faltaous, A. Abdulmaksoud, M. Kempe, F. Alt, and S. Schneegass. GeniePutt: Augmenting human motor skills through electrical muscle stimulation. It – information technology, 2021. doi:doi:10.1515/itit-2020-0035
[BibTeX] [PDF]
@Article{faltaous2021it,
author = {Sarah Faltaous and Aya Abdulmaksoud and Markus Kempe and Florian Alt and Stefan Schneegass},
journal = {it - Information Technology},
title = {{GeniePutt: Augmenting human motor skills through electrical muscle stimulation}},
year = {2021},
note = {faltaous2021it},
doi = {doi:10.1515/itit-2020-0035},
timestamp = {2021.05.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/faltaous2021it.pdf},
}
A. Schmidt, F. Alt, and V. Mäkelä. Evaluation in human-computer interaction – beyond lab studies. In Extended abstracts of the 2021 chi conference on human factors in computing systems (), Association for Computing Machinery, New York, NY, USA, 2021.
[BibTeX] [Abstract] [PDF]
Many research contributions in human-computer interaction are based on user studies in the lab. However, lab studies are not always possible, and they may come with significant challenges and limitations. In this course, we take a broader look at different approaches to doing research. We present a set of evaluation methods and research contributions that do not rely on user studies in labs. The discussion focuses on research approaches, data collection methods, and tools that can be conducted without direct interaction between the researchers and the participants.
@InProceedings{schmidt2021chiea,
author = {Schmidt, Albrecht and Alt, Florian and M\"{a}kel\"{a}, Ville},
booktitle = {Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems},
title = {Evaluation in Human-Computer Interaction – Beyond Lab Studies},
year = {2021},
address = {New York, NY, USA},
note = {schmidt2021chiea},
publisher = {Association for Computing Machinery},
abstract = {Many research contributions in human-computer interaction are based on user studies
in the lab. However, lab studies are not always possible, and they may come with significant
challenges and limitations. In this course, we take a broader look at different approaches
to doing research. We present a set of evaluation methods and research contributions
that do not rely on user studies in labs. The discussion focuses on research approaches,
data collection methods, and tools that can be conducted without direct interaction
between the researchers and the participants.},
articleno = {142},
isbn = {9781450380959},
numpages = {4},
timestamp = {2021.05.10},
url = {https://doi.org/10.1145/3411763.3445022},
}
R. Rivu, V. Mäkelä, S. Prange, S. D. Rodriguez, R. Piening, Y. Zhou, K. Köhle, K. Pfeuffer, Y. Abdelrahman, M. Hoppe, A. Schmidt, and F. Alt, Remote vr studies – a framework for running virtual reality studies remotely via participant-owned hmds, 2021.
[BibTeX] [PDF]
@Misc{rivu2021arxiv,
author = {Radiah Rivu and Ville Mäkelä and Sarah Prange and Sarah Delgado Rodriguez and Robin Piening and Yumeng Zhou and Kay Köhle and Ken Pfeuffer and Yomna Abdelrahman and Matthias Hoppe and Albrecht Schmidt and Florian Alt},
note = {rivu2021arxiv},
title = {Remote VR Studies -- A Framework for Running Virtual Reality Studies Remotely Via Participant-Owned HMDs},
year = {2021},
archiveprefix = {arXiv},
eprint = {2102.11207},
primaryclass = {cs.HC},
timestamp = {2021.05.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2021arxiv.pdf},
}
F. Alt. Out of the Lab Resarch in Usable Security and Privacy. In Adjunct proceedings of the 29th acm conference on user modeling, adaptation and personalization (UMAP ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3450614.3464468
[BibTeX] [Abstract] [PDF]
The COVID pandemic made it challenging for usable security and privacy researchers around the globe to run experiments involving human subjects, specifically in cases where such experiments are conducted in controlled lab setting. Examples include but are not limited to (a) observing and collecting data on user behavior with the goal of (b) informing the design and (c) engineering novel concepts based on adaptation and personalization as well as (d) evaluating such concepts regarding user performance and robustness against different threat models. In this keynote I will set out with providing a brief introduction to and examples on our research on behavioral biometrics. I will then discuss how the current situation influences research requiring close work with human subjects in lab settings and outline approaches to address emerging issues. Finally, I will provide some examples of out-of-the-lab research and reflect on both challenges and opportunities of these approaches.
@InProceedings{alt2021apps,
author = {Florian Alt},
booktitle = {Adjunct Proceedings of the 29th ACM Conference on User Modeling, Adaptation and Personalization},
title = {{Out of the Lab Resarch in Usable Security and Privacy}},
year = {2021},
address = {New York, NY, USA},
note = {alt2021apps},
organization = {New York, NY, USA},
publisher = {Association for Computing Machinery},
series = {UMAP '21},
abstract = {The COVID pandemic made it challenging for usable security and privacy researchers around the globe to run experiments involving human subjects, specifically in cases where such experiments are conducted in controlled lab setting. Examples include but are not limited to (a) observing and collecting data on user behavior with the goal of (b) informing the design and (c) engineering novel concepts based on adaptation and personalization as well as (d) evaluating such concepts regarding user performance and robustness against different threat models. In this keynote I will set out with providing a brief introduction to and examples on our research on behavioral biometrics. I will then discuss how the current situation influences research requiring close work with human subjects in lab settings and outline approaches to address emerging issues. Finally, I will provide some examples of out-of-the-lab research and reflect on both challenges and opportunities of these approaches.},
doi = {10.1145/3450614.3464468},
journal = {Adjunct Proceedings of the 29th ACMConference on User Modeling, Adaptation and Personalization},
timestamp = {2021.05.05},
url = {http://florian-alt.org/unibw/wp-content/publications/alt2021apps.pdf},
}
V. Mäekelä, J. Kleine, M. Hood, F. Alt, and A. Schmidt. Hidden Interaction Techniques: Concealed Information Acquisition and Texting on Smartphones and Wearables. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems (CHI ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3313831.3376840
[BibTeX] [Abstract] [PDF] [Video]
There are many situations where using personal devices is not socially acceptable, or where nearby people present a privacy risk. For these situations, we developed two eyes-free interaction techniques that are difficult to notice: HiddenHaptics, that allows users to receive information through vibrotactile cues on a smartphone, and HideWrite, that allows users to write text messages by drawing on a dimmed smartwatch screen. We conducted three user studies to investigate whether, and how, these techniques can be used without being exposed. Our primary findings are (1) users can effectively hide their interactions while attending to a social situation, (2) users seek to interact when another person is speaking, and they also tend to hide the interaction using their body or furniture, and (3) users can sufficiently focus on the social situation despite their interaction, whereas non-users feel that observing the user hinders their ability to focus on the social activity.
@InProceedings{maekelae2021chi,
author = {Ville M\"{a}ekel\"{a} AND Johannes Kleine AND Maxine Hood AND Florian Alt AND Albrecht Schmidt},
booktitle = {{Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems}},
title = {{Hidden Interaction Techniques: Concealed Information Acquisition and Texting on Smartphones and Wearables}},
year = {2021},
address = {New York, NY, USA},
note = {maekelae2021chi},
publisher = {Association for Computing Machinery},
series = {CHI ’21},
abstract = {There are many situations where using personal devices is not socially acceptable, or where nearby people present a privacy risk. For these situations, we developed two eyes-free interaction techniques that are difficult to notice: HiddenHaptics, that allows users to receive information through vibrotactile cues on a smartphone, and HideWrite, that allows users to write text messages by drawing on a dimmed smartwatch screen. We conducted three user studies to investigate whether, and how, these techniques can be used without being exposed. Our primary findings are (1) users can effectively hide their interactions while attending to a social situation, (2) users seek to interact when another person is speaking, and they also tend to hide the interaction using their body or furniture, and (3) users can sufficiently focus on the social situation despite their interaction, whereas non-users feel that observing the user hinders their ability to focus on the social activity.},
doi = {10.1145/3313831.3376840},
isbn = {9781450367080},
location = {Yokohama, Japan},
timestamp = {2021.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/maekelae2021chi.pdf},
video = {maekelae2021chi},
}
S. Prange, A. Shams, R. Piening, Y. Abdelrahman, and F. Alt. PriView – Exploring Visualisations Supporting Users’ Privacy Awareness. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems (CHI ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3313831.3376840
[BibTeX] [Abstract] [PDF] [Video]
There are many situations where using personal devices is not socially acceptable, or where nearby people present a privacy risk. For these situations, we developed two eyes-free interaction techniques that are difficult to notice: HiddenHaptics, that allows users to receive information through vibrotactile cues on a smartphone, and HideWrite, that allows users to write text messages by drawing on a dimmed smartwatch screen. We conducted three user studies to investigate whether, and how, these techniques can be used without being exposed. Our primary findings are (1) users can effectively hide their interactions while attending to a social situation, (2) users seek to interact when another person is speaking, and they also tend to hide the interaction using their body or furniture, and (3) users can sufficiently focus on the social situation despite their interaction, whereas non-users feel that observing the user hinders their ability to focus on the social activity.
@InProceedings{prange2021chi,
author = {Sarah Prange AND Ahmed Shams AND Robin Piening AND Yomna Abdelrahman AND Florian Alt},
booktitle = {{Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems}},
title = {{PriView -- Exploring Visualisations Supporting Users' Privacy Awareness}},
year = {2021},
address = {New York, NY, USA},
note = {prange2021chi},
publisher = {Association for Computing Machinery},
series = {CHI ’21},
abstract = {There are many situations where using personal devices is not socially acceptable, or where nearby people present a privacy risk. For these situations, we developed two eyes-free interaction techniques that are difficult to notice: HiddenHaptics, that allows users to receive information through vibrotactile cues on a smartphone, and HideWrite, that allows users to write text messages by drawing on a dimmed smartwatch screen. We conducted three user studies to investigate whether, and how, these techniques can be used without being exposed. Our primary findings are (1) users can effectively hide their interactions while attending to a social situation, (2) users seek to interact when another person is speaking, and they also tend to hide the interaction using their body or furniture, and (3) users can sufficiently focus on the social situation despite their interaction, whereas non-users feel that observing the user hinders their ability to focus on the social activity.},
doi = {10.1145/3313831.3376840},
isbn = {9781450367080},
location = {Yokohama, Japan},
timestamp = {2021.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2021chi.pdf},
video = {prange2021chi},
}
L. Müller, K. Pfeuffer, J. Gugenheimer, S. Prange, B. Pfleging, and F. Alt. SpatialProto: Using Real-World Captures for Rapid Prototyping of Mixed Reality Experiences. In Proceedings of the 2021CHI Conference on Human Factors in Computing Systems (CHI ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3313831.3376840
[BibTeX] [Abstract] [PDF]
Spatial computing systems that blend virtual and real worlds are increasingly becoming ubiquitous. However, creating experiences for spatial computing is difficult and requires skills in programming and 3D content creation, rendering them inaccessible to a wide user-group. We present SpatialProto, an in-situ spatial prototyping system that lowers the barrier for users to engage in spatial prototyping. With a depth-sensing capable Mixed Reality headset, SpatialProto lets users record animated objects of thereal-world environment (e.g. paper, clay, people or any other prop), extract only the relevant parts, and directly place and transform these recordings in their physical environment. We describe the design and implementation of SpatialProto, a user study evaluating the system’s prototype with non-expert users (n=9), and demonstrate applications where multiple captures are fused for compelling Mixed Reality experiences.
@InProceedings{mueller2021chi,
author = {Leon M\"{u}ller AND Ken Pfeuffer AND Jan Gugenheimer AND Sarah Prange AND Bastian Pfleging AND Florian Alt},
booktitle = {{Proceedings of the 2021CHI Conference on Human Factors in Computing Systems}},
title = {{SpatialProto: Using Real-World Captures for Rapid Prototyping of Mixed Reality Experiences}},
year = {2021},
address = {New York, NY, USA},
note = {mueller2021chi},
publisher = {Association for Computing Machinery},
series = {CHI ’21},
abstract = {Spatial computing systems that blend virtual and real worlds are increasingly becoming ubiquitous. However, creating experiences for spatial computing is difficult and requires skills in programming and 3D content creation, rendering them inaccessible to a wide user-group. We present SpatialProto, an in-situ spatial prototyping system that lowers the barrier for users to engage in spatial prototyping. With a depth-sensing capable Mixed Reality headset, SpatialProto lets users record animated objects of thereal-world environment (e.g. paper, clay, people or any other prop), extract only the relevant parts, and directly place and transform these recordings in their physical environment. We describe the design and implementation of SpatialProto, a user study evaluating the system's prototype with non-expert users (n=9), and demonstrate applications where multiple captures are fused for compelling Mixed Reality experiences.},
doi = {10.1145/3313831.3376840},
isbn = {9781450367080},
location = {Yokohama, Japan},
timestamp = {2021.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/mueller2021chi.pdf},
}
J. Liebers, U. Gruenefeld, L. Mecke, A. Saad, J. Auda, F. Alt, M. Abdelaziz, and S. Schneegass. Understanding User Identification in Virtual Reality through Behavioral Biometrics and the Effect of Body Normalization. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems (CHI ’21), Association for Computing Machinery, New York, NY, USA, 2021. doi:10.1145/3313831.3376840
[BibTeX] [Abstract] [PDF] [Video]
MVirtual Reality (VR) is becoming increasingly popular both in the entertainment and professional domains. Behavioral biometrics have recently been investigated as a means to continuously and implicitly identify users in VR. VR applications can specifically benefit from this, for example, to adapt the environment and user interface as well as to authenticate users. In this work, we conduct a lab study (N=16) to explore how accurately users can be identified during two task-driven scenarios based on their spatial movement. We show that an identification accuracy of up to 90% is possible across sessions recorded on different days. oreover, we investigate the role of users’ physiology on behavioral biometrics. In particular, we virtually alter and normalize users’ body proportions to examine the influence on behavior. We find that body normalization in general increases the identification rate, in some cases by up to 38%, hence it improves the performance of identification systems.
@InProceedings{liebers2021chi,
author = {Jonathan Liebers AND Uwe Gruenefeld AND Lukas Mecke AND Alia Saad AND Jonas Auda AND Florian Alt AND Mark Abdelaziz AND Stefan Schneegass},
booktitle = {{Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems}},
title = {{Understanding User Identification in Virtual Reality through Behavioral Biometrics and the Effect of Body Normalization}},
year = {2021},
address = {New York, NY, USA},
note = {liebers2021chi},
publisher = {Association for Computing Machinery},
series = {CHI ’21},
abstract = {MVirtual Reality (VR) is becoming increasingly popular both in the entertainment and professional domains. Behavioral biometrics have recently been investigated as a means to continuously and implicitly identify users in VR. VR applications can specifically benefit from this, for example, to adapt the environment and user interface as well as to authenticate users. In this work, we conduct a lab study (N=16) to explore how accurately users can be identified during two task-driven scenarios based on their spatial movement. We show that an identification accuracy of up to 90% is possible across sessions recorded on different days. oreover, we investigate the role of users' physiology on behavioral biometrics. In particular, we virtually alter and normalize users' body proportions to examine the influence on behavior. We find that body normalization in general increases the identification rate, in some cases by up to 38%, hence it improves the performance of identification systems.},
doi = {10.1145/3313831.3376840},
isbn = {9781450367080},
location = {Yokohama, Japan},
timestamp = {2021.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/liebers2021chi.pdf},
video = {liebers2021chi},
}
S. D. Rodriguez, S. Prange, L. Mecke, and F. Alt. ActPad – A Smart Desk Platform to Enable User Interaction with IoT Devices. In Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems (CHIEA ’21), ACM, New York, NY, USA, 2021.
[BibTeX] [PDF] [Video]
@InProceedings{delgado2021chiea,
author = {Sarah Delgado Rodriguez AND Prange, Sarah AND Lukas Mecke and Alt, Florian},
booktitle = {{Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems}},
title = {{ActPad -- A Smart Desk Platform to Enable User Interaction with IoT Devices}},
year = {2021},
address = {New York, NY, USA},
publisher = {ACM},
series = {CHIEA ’21},
location = {Yokohama, Japan},
numpages = {8},
timestamp = {2021.05.02},
}
Y. Abdrabou, Y. Abdelrahman, M. Khamis, and F. Alt. Think about it! Investigating the Effect of Password Strength on Cognitive Load during Password Creation. In Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems (CHIEA ’21), ACM, New York, NY, USA, 2021.
[BibTeX] [PDF] [Video]
@InProceedings{abdrabou2021chiea,
author = {Yasmeen Abdrabou AND Yomna Abdelrahman AND Mohamed Khamis and Alt, Florian},
booktitle = {{Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems}},
year = {2021},
address = {New York, NY, USA},
note = {abdrabou2021chiea},
publisher = {ACM},
series = {CHIEA ’21},
location = {Yokohama, Japan},
numpages = {8},
timestamp = {2021.05.02},
url = {http://florian-alt.org/unibw/wp-content/publications/abdrabou2021chiea.pdf},
video = {abdrabou2021chiea},
}
S. Prange, K. Marky, and F. Alt. Usable authentication in multi-device ecosystems. In Proceedings of the chi 2021 workshop on user experience for multi-device ecosystems: challenges and opportunities (UX4MDE ’21), 2021.
[BibTeX] [PDF]
@InProceedings{prange2021ux4mde,
author = {Sarah Prange AND Karola Marky AND Florian Alt},
booktitle = {Proceedings of the CHI 2021 Workshop on User Experience for Multi-Device Ecosystems: Challenges and Opportunities},
title = {Usable Authentication in Multi-Device Ecosystems},
year = {2021},
note = {prange2021ux4mde},
series = {UX4MDE '21},
timestamp = {2021.04.30},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2021ux4mde.pdf},
}
M. Khamis and F. Alt, “Technology-Augmented Perception and Cognition,” in Technology-augmented perception and cognition, T. Dingler and E. Niforatos, Eds., Cham: Springer International Publishing, 2021, p. 257–279. doi:10.1007/978-3-030-30457-7_8
[BibTeX] [Abstract] [PDF]
In this chapter, we present a privacy and security framework for designers of technologies that augment humans’ cognitive and perceptive capabilities. The framework consists of several groups of questions, meant to guide designers during the different stages of the design process. The objective of our work is to support the need for considering implications of novel technologies with regard to privacy and security early in the design process rather than post-hoc. The framework is based on a thorough review of the technologies presented earlier on in this book as well as of prior research in the field of technology augmentation. From this review, we derived several themes that are not only valuable pointers for future work but also serve as a basis for the subsequent framework. We point out the need to focus on the following aspects: data handling, awareness, user consent, and the design of the user interface.
@InBook{khamis2021springer,
author = {Khamis, Mohamed and Alt, Florian},
chapter = {Privacy and Security in Augmentation Technologies},
editor = {Dingler, Tilman and Niforatos, Evangelos},
pages = {257--279},
publisher = {Springer International Publishing},
title = {{Technology-Augmented Perception and Cognition}},
year = {2021},
isbn = {978-3-030-30457-7},
note = {khamis2021springer},
abstract = {In this chapter, we present a privacy and security framework for designers of technologies that augment humans' cognitive and perceptive capabilities. The framework consists of several groups of questions, meant to guide designers during the different stages of the design process. The objective of our work is to support the need for considering implications of novel technologies with regard to privacy and security early in the design process rather than post-hoc. The framework is based on a thorough review of the technologies presented earlier on in this book as well as of prior research in the field of technology augmentation. From this review, we derived several themes that are not only valuable pointers for future work but also serve as a basis for the subsequent framework. We point out the need to focus on the following aspects: data handling, awareness, user consent, and the design of the user interface.},
booktitle = {Technology-Augmented Perception and Cognition},
doi = {10.1007/978-3-030-30457-7_8},
timestamp = {2021.01.15},
url = {http://florian-alt.org/unibw/wp-content/publications/khamis2021springer.pdf},
}
D. Buschek and F. Alt, “Building adaptive touch interfaces—case study 6,” in Intelligent computing for interactive system design: statistics, digital signal processing, and machine learning in practice, 1 ed., New York, NY, USA: Association for Computing Machinery, 2021, p. 379–406.
[BibTeX] [PDF]
@InBook{buschek2021intelligentcomputing,
author = {Buschek, Daniel and Alt, Florian},
pages = {379–406},
publisher = {Association for Computing Machinery},
title = {Building Adaptive Touch Interfaces—Case Study 6},
year = {2021},
address = {New York, NY, USA},
edition = {1},
isbn = {9781450390293},
note = {buschek2021intelligentcomputing},
booktitle = {Intelligent Computing for Interactive System Design: Statistics, Digital Signal Processing, and Machine Learning in Practice},
numpages = {28},
owner = {florian},
timestamp = {2021.01.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2021intelligentcomputing.pdf},
}
K. Pfeuffer, Y. Abdrabou, A. Esteves, R. Rivu, Y. Abdelrahman, S. Meitner, A. Saadi, and F. Alt. ARtention: A Design Space for Gaze-adaptive User Interfaces in Augmented Reality. Computers & graphics, 2021. doi:https://doi.org/10.1016/j.cag.2021.01.001
[BibTeX] [Abstract] [PDF]
Augmented Reality (AR) headsets extended with eye-tracking, a promising input technology for its natural and implicit nature, open a wide range of new interaction capabilities for everyday use. In this paper we present ARtention, a design space for gaze interaction specifically tailored for in-situ AR information interfaces. It highlights three important dimensions to consider in the UI design of such gaze-enabled applications: transitions from reality to the virtual interface, from single- to multi-layer content, and from information consumption to selection tasks. Such transitional aspects bring previously isolated gaze interaction concepts together to form a unified AR space, enabling more advanced application control seamlessly mediated by gaze. We describe these factors in detail. To illustrate how the design space can be used, we present three prototype applications and report informal user feedback obtained from different scenarios: a conversational UI, viewing a 3D visualization, and browsing items for shopping. We conclude with design considerations derived from our development and evaluation of the prototypes. We expect these to be valuable for researchers and designers investigating the use of gaze input in AR systems and applications.
@Article{pfeuffer2021cg,
author = {Ken Pfeuffer and Yasmeen Abdrabou and Augusto Esteves and Radiah Rivu and Yomna Abdelrahman and Stefanie Meitner and Amr Saadi and Florian Alt},
journal = {Computers & Graphics},
title = {{ARtention: A Design Space for Gaze-adaptive User Interfaces in Augmented Reality}},
year = {2021},
issn = {0097-8493},
note = {pfeuffer2021cg},
abstract = {Augmented Reality (AR) headsets extended with eye-tracking, a promising input technology for its natural and implicit nature, open a wide range of new interaction capabilities for everyday use. In this paper we present ARtention, a design space for gaze interaction specifically tailored for in-situ AR information interfaces. It highlights three important dimensions to consider in the UI design of such gaze-enabled applications: transitions from reality to the virtual interface, from single- to multi-layer content, and from information consumption to selection tasks. Such transitional aspects bring previously isolated gaze interaction concepts together to form a unified AR space, enabling more advanced application control seamlessly mediated by gaze. We describe these factors in detail. To illustrate how the design space can be used, we present three prototype applications and report informal user feedback obtained from different scenarios: a conversational UI, viewing a 3D visualization, and browsing items for shopping. We conclude with design considerations derived from our development and evaluation of the prototypes. We expect these to be valuable for researchers and designers investigating the use of gaze input in AR systems and applications.},
doi = {https://doi.org/10.1016/j.cag.2021.01.001},
keywords = {Augmented reality, AR, Mixed reality, MR, Gaze Interaction, Attention, Visualization, Design space},
timestamp = {2021.01.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeuffer2021cg.pdf},
}

### 2020

A. Schmidt and F. Alt. Evaluation in human-computer interaction – beyond lab studies. Working document, 2020.
[BibTeX] [Abstract] [PDF]
In this paper we present a set of approaches to evaluation in human computer interaction that offer an alternative to lab studies. The discussion focuses on research approaches, data collection methods, and tools that can be conducted without direct interaction between the researchers and the participants.
@Article{schmidt2020beyondlab,
author = {Albrecht Schmidt AND Florian Alt},
journal = {Working Document},
title = {Evaluation in Human-Computer Interaction -- Beyond Lab Studies},
year = {2020},
note = {schmidt2020beyondlab},
abstract = {In this paper we present a set of approaches to evaluation in human computer interaction that offer an alternative to lab studies. The discussion focuses on research approaches, data collection methods, and tools that can be conducted without direct interaction between the researchers and the participants.},
timestamp = {2021.05.07},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schmidt2020beyondlab.pdf},
}
G. Graf, Y. Abdelrahman, H. Xu, Y. Abdrabou, D. Schitz, H. Hußmann, and F. Alt. The Predictive Corridor: A Virtual Augmented Driving Assistance System for Teleoperated Autonomous Vehicles. In International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE 2020), The Eurographics Association, 2020. doi:10.2312/egve.20201260
[BibTeX] [PDF]
@InProceedings{graf2020icat,
author = {Graf, Gaetano and Abdelrahman, Yomna and Xu, Hao and Abdrabou, Yasmeen and Schitz, Dmitrij and Hußmann, Heinrich and Alt, Florian},
booktitle = {{International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments}},
title = {{The Predictive Corridor: A Virtual Augmented Driving Assistance System for Teleoperated Autonomous Vehicles}},
year = {2020},
editor = {Argelaguet, Ferran and McMahan, Ryan and Sugimoto, Maki},
note = {graf2020icat},
publisher = {The Eurographics Association},
series = {ICAT-EGVE 2020},
doi = {10.2312/egve.20201260},
isbn = {978-3-03868-111-3},
issn = {1727-530X},
timestamp = {2020.12.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/graf2020icat.pdf},
}
Y. Fanger, K. Pfeuffer, U. Helmbrecht, and F. Alt. Pianx – a platform for piano players to alleviate music performance anxiety using mixed reality. In 19th international conference on mobile and ubiquitous multimedia (MUM 2020), Association for Computing Machinery, New York, NY, USA, 2020, p. 267–276. doi:10.1145/3428361.3428394
[BibTeX] [Abstract] [PDF]
We present PIANX, a platform to assist piano players in alleviating Music Performance Anxiety (MPA). Our work is motivated by the ability of Virtual Reality (VR) to create environments closely resembling the real world. For musicians, settings such as auditions or concerts are of particular interest, since they allow practicing in situations which evoke stress as a result of stage fright. Current approaches are limited: while they provide a virtual scene, realistic haptic feedback (i.e. playing on a real piano) and an authentic representation of their hands is missing. We close this gap with the design of a Mixed Reality platform, consisting of a MIDI (Musical Instrument Digital Interface) stage piano and an HTC Vive Pro VR headset. The platform offers (a) two approaches to finger tracking and visualization – a virtual representation based on LeapMotion hand tracking (baseline) and a real representation using see-through VR; in addition, it provides (b) three different settings in which users can practice (home, audition, concert hall) and (c) a mechanism for real time feedback. We created a series of videos demonstrating the system and collected feedback from 23 participants in an online study, assessing their views towards our platform. Results reveal key insights for the design of virtual MPA training platforms from a scientific and consumer perspective.
@InProceedings{fanger2020mum,
author = {Fanger, Yara and Pfeuffer, Ken and Helmbrecht, Udo and Alt, Florian},
booktitle = {19th International Conference on Mobile and Ubiquitous Multimedia},
title = {PIANX – A Platform for Piano Players to Alleviate Music Performance Anxiety Using Mixed Reality},
year = {2020},
address = {New York, NY, USA},
note = {fanger2020mum},
pages = {267–276},
publisher = {Association for Computing Machinery},
series = {MUM 2020},
abstract = {We present PIANX, a platform to assist piano players in alleviating Music Performance
Anxiety (MPA). Our work is motivated by the ability of Virtual Reality (VR) to create
environments closely resembling the real world. For musicians, settings such as auditions
or concerts are of particular interest, since they allow practicing in situations
which evoke stress as a result of stage fright. Current approaches are limited: while
they provide a virtual scene, realistic haptic feedback (i.e. playing on a real piano)
and an authentic representation of their hands is missing. We close this gap with
the design of a Mixed Reality platform, consisting of a MIDI (Musical Instrument Digital
Interface) stage piano and an HTC Vive Pro VR headset. The platform offers (a) two
approaches to finger tracking and visualization – a virtual representation based on
LeapMotion hand tracking (baseline) and a real representation using see-through VR;
in addition, it provides (b) three different settings in which users can practice
(home, audition, concert hall) and (c) a mechanism for real time feedback. We created
a series of videos demonstrating the system and collected feedback from 23 participants
in an online study, assessing their views towards our platform. Results reveal key
insights for the design of virtual MPA training platforms from a scientific and consumer
perspective.},
doi = {10.1145/3428361.3428394},
isbn = {9781450388702},
keywords = {virtual reality, performance anxiety, music},
location = {Essen, Germany},
numpages = {10},
timestamp = {2020.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/fanger2020mum.pdf},
}
K. Marky, S. Prange, F. Krell, M. Mühlhäuser, and F. Alt. ‘You just can’t know about everything’: Privacy Perceptions of Smart Home Visitors. In Proceedings of the 19th International Conference on Mobile and Ubiquitous Multimedia (MUM’20), ACM, New York, NY, USA, 2020. doi:10.1145/3365610.3365626
[BibTeX] [Abstract] [PDF]
IoT devices can harvest personal information of any person in their surroundings and this includes data from \textit{visitors}. Visitors often cannot protect their privacy in a foreign smart environment. This might be rooted in a poor awareness of privacy violations by IoT devices, a lack of knowledge, or a lack of coping strategies. Thus, visitors are typically unaware of being tracked by IoT devices or lack means to influence which data is collected about them. We interviewed 21 young adults to investigate which knowledge visitors of smart environments need and wish to be able and protect their privacy. We found that visitors consider their relation to the IoT device owner and familiarity with the environment and IoT devices when making decisions about data sharing that affect their privacy. Overall, the visitors of smart environments demonstrated similar privacy preferences like the owners of IoT devices but lacked means to judge consequences of data collection and means to express their privacy preferences. Based on our results, we discuss prerequisites for enabling visitor privacy in smart environments, demonstrate gaps in existing solutions and provide several methods to improve the awareness of smart environment visitors.
@InProceedings{marky2020mum,
author = {Marky, Karola AND Prange, Sarah AND Krell, Florian AND Mühlhäuser, Max AND Alt, Florian},
title = {{'You just can’t know about everything': Privacy Perceptions of Smart Home Visitors}},
booktitle = {{Proceedings of the 19th International Conference on Mobile and Ubiquitous Multimedia}},
year = {2020},
series = {MUM'20},
address = {New York, NY, USA},
publisher = {ACM},
note = {marky2020mum},
abstract = {IoT devices can harvest personal information of any person in their surroundings and this includes data from \textit{visitors}. Visitors often cannot protect their privacy in a foreign smart environment. This might be rooted in a poor awareness of privacy violations by IoT devices, a lack of knowledge, or a lack of coping strategies. Thus, visitors are typically unaware of being tracked by IoT devices or lack means to influence which data is collected about them.
We interviewed 21 young adults to investigate which knowledge visitors of smart environments need and wish to be able and protect their privacy.
We found that visitors consider their relation to the IoT device owner and familiarity with the environment and IoT devices when making decisions about data sharing that affect their privacy. Overall, the visitors of smart environments demonstrated similar privacy preferences like the owners of IoT devices but lacked means to judge consequences of data collection and means to express their privacy preferences.
Based on our results, we discuss prerequisites for enabling visitor privacy in smart environments, demonstrate gaps in existing solutions and provide several methods to improve the awareness of smart environment visitors.},
doi = {10.1145/3365610.3365626},
location = {Essen, Germany},
timestamp = {2020.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/marky2020mum.pdf},
}
M. Khamis and F. Alt, “Augmented Perception and Cognition,” , T. Dingler, Ed., Springer, 2020.
[BibTeX] [Abstract] [PDF]
In this chapter, we present a privacy and security framework for designers of technologies that augment humans’ cognitive and perceptive capabilities. The framework consists of several groups of questions, meant to guide designers during the different stages of the design process. The objective of our work is to support the need for considering implications of novel technologies with regard to privacy and security early in the design process rather than post-hoc. The framework is based on a thorough review of the technologies presented earlier on in this book as well as of prior research in the field of technology augmentation. From this review, we derived several themes that are not only valuable pointers for future work but also serve as a basis for the subsequent framework. We point out the need to focus on the following aspects: data handling, awareness, user consent, and the design of the user interface.
@InBook{khamis2020augmentation,
author = {Khamis, Mohamed AND Alt, Florian},
chapter = {Privacy and Security in Augmentation},
editor = {Dingler, Tilman},
publisher = {Springer},
title = {{Augmented Perception and Cognition}},
year = {2020},
note = {khamis2020augmentation},
abstract = {In this chapter, we present a privacy and security framework for designers of technologies that augment
humans’ cognitive and perceptive capabilities. The framework consists of several groups of questions,
meant to guide designers during the different stages of the design process. The objective of our work is to
support the need for considering implications of novel technologies with regard to privacy and security
early in the design process rather than post-hoc. The framework is based on a thorough review of the
technologies presented earlier on in this book as well as of prior research in the field of technology
augmentation. From this review, we derived several themes that are not only valuable pointers for future
work but also serve as a basis for the subsequent framework. We point out the need to focus on the
following aspects: data handling, awareness, user consent, and the design of the user interface.},
owner = {florian},
timestamp = {2020.11.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2020augmentation.pdf},
}
D. Englmeier, J. O’Hagan, M. Zhang, F. Alt, A. Butz, T. Höllerer, and J. Williamson. Tangiblesphere – interaction techniques for physical and virtual spherical displays. In Proceedings of the 11th nordic conference on human-computer interaction: shaping experiences, shaping society (NordiCHI ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3419249.3420101
[BibTeX] [Abstract] [PDF] [Video]
Tangible interaction is generally assumed to provide benefits compared to other interaction styles due to its physicality. We demonstrate how this physicality can be brought to VR by means of TangibleSphere – a tracked, low-cost physical object that can (a) be rotated freely and (b) is overlaid with a virtual display. We present two studies, investigating performance in terms of efficiency and usability: the first study (N=16) compares TangibleSphere to a physical spherical display regarding accuracy and task completion time. We found comparable results for both types of displays. The second study (N=32) investigates the influence of physical rotation in more depth. We compare a pure VR condition to TangibleSphere in two conditions: one that allows actual physical rotation of the object and one that does not. Our findings show that physical rotation significantly improves accuracy and task completion time. These insights are valuable for researchers designing interaction techniques and interactive visualizations for spherical displays and for VR researchers aiming to incorporate physical touch into the experiences they design.
@InProceedings{englmeier2020nordichi,
author = {Englmeier, David and O'Hagan, Joseph and Zhang, Mengyi and Alt, Florian and Butz, Andreas and H\"{o}llerer, Tobias and Williamson, Julie},
booktitle = {Proceedings of the 11th Nordic Conference on Human-Computer Interaction: Shaping Experiences, Shaping Society},
title = {TangibleSphere – Interaction Techniques for Physical and Virtual Spherical Displays},
year = {2020},
address = {New York, NY, USA},
note = {englmeier2020nordichi},
publisher = {Association for Computing Machinery},
series = {NordiCHI '20},
abstract = {Tangible interaction is generally assumed to provide benefits compared to other interaction styles due to its physicality. We demonstrate how this physicality can be brought to VR by means of TangibleSphere – a tracked, low-cost physical object that can (a) be rotated freely and (b) is overlaid with a virtual display. We present two studies, investigating performance in terms of efficiency and usability: the first study (N=16) compares TangibleSphere to a physical spherical display regarding accuracy and task completion time. We found comparable results for both types of displays. The second study (N=32) investigates the influence of physical rotation in more depth. We compare a pure VR condition to TangibleSphere in two conditions: one that allows actual physical rotation of the object and one that does not. Our findings show that physical rotation significantly improves accuracy and task completion time. These insights are valuable for researchers designing interaction techniques and interactive visualizations for spherical displays and for VR researchers aiming to incorporate physical touch into the experiences they design.},
articleno = {75},
doi = {10.1145/3419249.3420101},
isbn = {9781450375795},
keywords = {tangible interaction, physicality, virtual reality, spherical displays, display simulation},
location = {Tallinn, Estonia},
numpages = {11},
timestamp = {2020.10.19},
url = {https://doi.org/10.1145/3419249.3420101},
video = {englmeier2020nordichi},
}
M. Braun, J. Li, F. Weber, B. Pfleging, A. Butz, and F. Alt. What If Your Car Would Care? Exploring Use Cases For Affective Automotive User Interfaces. In 22nd international conference on human-computer interaction with mobile devices and services (MobileHCI ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3379503.3403530
[BibTeX] [Abstract] [PDF]
In this paper we present use cases for affective user interfaces (UIs) in cars and how they are perceived by potential users in China and Germany. Emotion-aware interaction is enabled by the improvement of ubiquitous sensing methods and provides potential benefits for both traffic safety and personal well-being. To promote the adoption of affective interaction at an international scale, we developed 20 mobile in-car use cases through an inter-cultural design approach and evaluated them with 65 drivers in Germany and China. Our data shows perceived benefits in specific areas of pragmatic quality as well as cultural differences, especially for socially interactive use cases. We also discuss general implications for future affective automotive UI. Our results provide a perspective on cultural peculiarities and a concrete starting point for practitioners and researchers working on emotion-aware interfaces.
@InProceedings{braun2020mobilehci,
author = {Michael Braun AND Jingyi Li AND Florian Weber AND Bastian Pfleging AND Andreas Butz AND Florian Alt},
booktitle = {22nd International Conference on Human-Computer Interaction with Mobile Devices and Services},
title = {{What If Your Car Would Care? Exploring Use Cases For Affective Automotive User Interfaces}},
year = {2020},
address = {New York, NY, USA},
note = {braun2020mobilehci},
publisher = {Association for Computing Machinery},
series = {MobileHCI '20},
abstract = {In this paper we present use cases for affective user interfaces (UIs) in cars and how they are perceived by potential users in China and Germany. Emotion-aware interaction is enabled by the improvement of ubiquitous sensing methods and provides potential benefits for both traffic safety and personal well-being. To promote the adoption of affective interaction at an international scale, we developed 20 mobile in-car use cases through an inter-cultural design approach and evaluated them with 65 drivers in Germany and China. Our data shows perceived benefits in specific areas of pragmatic quality as well as cultural differences, especially for socially interactive use cases. We also discuss general implications for future affective automotive UI. Our results provide a perspective on cultural peculiarities and a concrete starting point for practitioners and researchers working on emotion-aware interfaces.},
articleno = {37},
doi = {10.1145/3379503.3403530},
isbn = {9781450375160},
keywords = {Human-Computer Interaction, Affective Computing, Interaction Design, Automotive User Interfaces, Emotion Detection},
location = {Oldenburg, Germany},
numpages = {12},
timestamp = {2020.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2020mobilehci.pdf},
}
M. Fröhlich, F. Gutjahr, and F. Alt. Don’t lose your coin! investigating security practices of cryptocurrency users. In Proceedings of the 2020 acm designing interactive systems conference (), Association for Computing Machinery, New York, NY, USA, 2020, p. 1751–1763.
[BibTeX] [Abstract] [PDF]
In recent years, cryptocurrencies have increasingly gained interest. The underlying technology, Blockchain, shifts the responsibility for securing assets to the end-user and requires them to manage their (private) keys. Little attention has been given to how cryptocurrency users handle the challenges of key management in practice and how they select the tools to do so. To close this gap, we conducted semi-structured interviews (N=10). Our thematic analysis revealed prominent themes surrounding motivation, risk assessment, and coin management tool usage in practice. We found that the choice of tools is driven by how users assess and balance the key risks that can lead to loss: the risk of (1) human error, (2) betrayal, and (3) malicious attacks. We derive a model, explaining how risk assessment and intended usage drive the decision which tools to use. Our work is complemented by discussing design implications for building systems for the crypto economy.
@InProceedings{froehlich2020dis,
author = {Fr\"{o}hlich, Michael and Gutjahr, Felix and Alt, Florian},
booktitle = {Proceedings of the 2020 ACM Designing Interactive Systems Conference},
title = {Don't Lose Your Coin! Investigating Security Practices of Cryptocurrency Users},
year = {2020},
address = {New York, NY, USA},
note = {froehlich2020dis},
pages = {1751–1763},
publisher = {Association for Computing Machinery},
abstract = {In recent years, cryptocurrencies have increasingly gained interest. The underlying
technology, Blockchain, shifts the responsibility for securing assets to the end-user
and requires them to manage their (private) keys. Little attention has been given
to how cryptocurrency users handle the challenges of key management in practice and
how they select the tools to do so. To close this gap, we conducted semi-structured
interviews (N=10). Our thematic analysis revealed prominent themes surrounding motivation,
risk assessment, and coin management tool usage in practice. We found that the choice
of tools is driven by how users assess and balance the key risks that can lead to
loss: the risk of (1) human error, (2) betrayal, and (3) malicious attacks. We derive
a model, explaining how risk assessment and intended usage drive the decision which
tools to use. Our work is complemented by discussing design implications for building
systems for the crypto economy.},
isbn = {9781450369749},
numpages = {13},
owner = {florian},
timestamp = {2020.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/froehlich2020dis.pdf},
}
V. Gentile, M. Khamis, F. Milazzo, S. Sorce, A. Malizia, and F. Alt. Predicting mid-air gestural interaction with public displays based on audience behaviour. International journal of human-computer studies, vol. 144, p. 102497, 2020. doi:https://doi.org/10.1016/j.ijhcs.2020.102497
[BibTeX] [Abstract] [PDF]
Knowledge about the expected interaction duration and expected distance from which users will interact with public displays can be useful in many ways. For example, knowing upfront that a certain setup will lead to shorter interactions can nudge space owners to alter the setup. If a system can predict that incoming users will interact at a long distance for a short amount of time, it can accordingly show shorter versions of content (e.g., videos/advertisements) and employ at-a-distance interaction modalities (e.g., mid-air gestures). In this work, we propose a method to build models for predicting users’ interaction duration and distance in public display environments, focusing on mid-air gestural interactive displays. First, we report our findings from a field study showing that multiple variables, such as audience size and behaviour, significantly influence interaction duration and distance. We then train predictor models using contextual data, based on the same variables. By applying our method to a mid-air gestural interactive public display deployment, we build a model that predicts interaction duration with an average error of about 8 s, and interaction distance with an average error of about 35 cm. We discuss how researchers and practitioners can use our work to build their own predictor models, and how they can use them to optimise their deployment.
@Article{gentile2020ijhcs,
author = {Vito Gentile and Mohamed Khamis and Fabrizio Milazzo and Salvatore Sorce and Alessio Malizia and Florian Alt},
journal = {International Journal of Human-Computer Studies},
title = {Predicting mid-air gestural interaction with public displays based on audience behaviour},
year = {2020},
issn = {1071-5819},
note = {gentile2020ijhcs},
pages = {102497},
volume = {144},
abstract = {Knowledge about the expected interaction duration and expected distance from which users will interact with public displays can be useful in many ways. For example, knowing upfront that a certain setup will lead to shorter interactions can nudge space owners to alter the setup. If a system can predict that incoming users will interact at a long distance for a short amount of time, it can accordingly show shorter versions of content (e.g., videos/advertisements) and employ at-a-distance interaction modalities (e.g., mid-air gestures). In this work, we propose a method to build models for predicting users’ interaction duration and distance in public display environments, focusing on mid-air gestural interactive displays. First, we report our findings from a field study showing that multiple variables, such as audience size and behaviour, significantly influence interaction duration and distance. We then train predictor models using contextual data, based on the same variables. By applying our method to a mid-air gestural interactive public display deployment, we build a model that predicts interaction duration with an average error of about 8 s, and interaction distance with an average error of about 35 cm. We discuss how researchers and practitioners can use our work to build their own predictor models, and how they can use them to optimise their deployment.},
doi = {https://doi.org/10.1016/j.ijhcs.2020.102497},
keywords = {Pervasive displays, Users behaviour, Audience behaviour},
owner = {florian},
timestamp = {2020.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/gentile2020ijhcs.pdf},
}
Y. Abdrabou, K. Pfeuffer, M. Khamis, and F. Alt. GazeLockPatterns: Comparing Authentication Using Gaze andTouch for Entering Lock Patterns. In Proceedings of the 2020 ACM Symposium on Eye Tracking Research & Applications (ETRA ’20), ACM, New York, NY, USA, 2020. doi:10.1145/3379156.3391371
[BibTeX] [Abstract] [PDF] [Video]
In this work, we present a comparison between Android’s lock patterns for mobile devices (TouchLockPatterns) and an implementation of lock patterns that uses gaze input (GazeLockPatterns). We report on results of a between subjects study (N=40) to show that for the same layout of authentication interface, people employ comparable strategies for pattern composition. We discuss the pros and cons of adapting lock patterns to gaze-based user interfaces. We conclude by opportunities for future work, such as using data collected during authentication for calibrating eye trackers.
@InProceedings{abdrabou2020etra,
author = {Yasmeen Abdrabou AND Ken Pfeuffer AND Mohamed Khamis AND Florian Alt},
booktitle = {{Proceedings of the 2020 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{GazeLockPatterns: Comparing Authentication Using Gaze andTouch for Entering Lock Patterns}},
year = {2020},
address = {New York, NY, USA},
note = {abdrabou2020etra},
publisher = {ACM},
series = {ETRA '20},
abstract = {In this work, we present a comparison between Android's lock patterns for mobile devices (TouchLockPatterns) and an implementation of lock patterns that uses gaze input (GazeLockPatterns).
We report on results of a between subjects study (N=40) to show that for the same layout of authentication interface, people employ comparable strategies for pattern composition. We discuss the pros and cons of adapting lock patterns to gaze-based user interfaces. We conclude by opportunities for future work, such as using data collected during authentication for calibrating eye trackers.},
doi = {10.1145/3379156.3391371},
isbn = {978-1-4503-7134-6},
keywords = {eye-tracking, calibration, eye-tracker, smooth pursuit, eye movement},
location = {Stuttgart, Germany},
numpages = {7},
owner = {florian},
timestamp = {2020.06.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdrabou2020etra.pdf},
video = {abdrabou2020etra},
}
R. Rivu, Y. Abdrabou, K. Pfeuffer, A. Esteves, S. Meitner, and F. Alt. Stare: gaze-assisted face-to-face communication in augmented reality. In Acm symposium on eye tracking research and applications (COGAIN ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3379157.3388930
[BibTeX] [Abstract] [PDF]
This research explores the use of eye-tracking during Augmented Reality (AR) – supported conversations. In this scenario, users can obtain information that supports the conversation, without augmentations distracting the actual conversation.We propose using gaze that allows users to gradually reveal information on demand. Information is indicated around user’s head, which becomes fully visible when other’s visual attention explicitly falls upon the area. We describe the design of such an AR UI and present an evaluation of the feasibility of the concept. Results show that despite gaze inaccuracies, users were positive about augmenting their conversations with contextual information and gaze interactivity. We provide insights into the trade-offs between focusing on the task at hand (i.e., the conversation), and consuming AR information. These findings are useful for future use cases of eye based AR interactions by contributing to a better understanding of the intricate balance between informative AR and information overload.
@InProceedings{rivu2020cogain,
author = {Rivu, Radiah and Abdrabou, Yasmeen and Pfeuffer, Ken and Esteves, Augusto and Meitner, Stefanie and Alt, Florian},
booktitle = {ACM Symposium on Eye Tracking Research and Applications},
title = {StARe: Gaze-Assisted Face-to-Face Communication in Augmented Reality},
year = {2020},
address = {New York, NY, USA},
note = {rivu2020cogain},
publisher = {Association for Computing Machinery},
series = {COGAIN '20},
abstract = {This research explores the use of eye-tracking during Augmented Reality (AR) - supported
conversations. In this scenario, users can obtain information that supports the conversation,
without augmentations distracting the actual conversation.We propose using gaze that
allows users to gradually reveal information on demand. Information is indicated around
user’s head, which becomes fully visible when other’s visual attention explicitly
falls upon the area. We describe the design of such an AR UI and present an evaluation
of the feasibility of the concept. Results show that despite gaze inaccuracies, users
were positive about augmenting their conversations with contextual information and
gaze interactivity. We provide insights into the trade-offs between focusing on the
task at hand (i.e., the conversation), and consuming AR information. These findings
are useful for future use cases of eye based AR interactions by contributing to a
better understanding of the intricate balance between informative AR and information
articleno = {14},
doi = {10.1145/3379157.3388930},
isbn = {9781450371353},
keywords = {Assistive Conversation, Eye-tracking, AR, Gaze Interaction},
location = {Stuttgart, Germany},
numpages = {5},
timestamp = {2020.06.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2020cogain.pdf},
}
S. Prange, L. Mecke, A. Nguyen, M. Khamis, and F. Alt. Don’t Use Fingerprint, it’s Raining! How People Use and Perceive Context-Aware Selection of Mobile Authentication. In Proceedings of the international conference on advanced visual interfaces (AVI ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3399715.3399823
[BibTeX] [Abstract] [PDF]
This paper investigates how smartphone users perceive switching from their primary authentication mechanism to a fallback one, based on the context. This is useful in cases where the primary mechanism fails (e.g., wet fingers when using fingerprint). While prior work introduced the concept, we are the first to investigate its perception by users and their willingness to follow a system’s suggestion for a switch. We present findings from a two-week field study (N=29) using an Android app, showing that users are willing to adopt alternative mechanisms when prompted. We discuss how context-awareness can improve the perception of authentication reliability and potentially improve usability and security.
@InProceedings{prange2020avi,
author = {Sarah Prange AND Lukas Mecke AND Alice Nguyen AND Mohamed Khamis AND Florian Alt},
booktitle = {Proceedings of the International Conference on Advanced Visual Interfaces},
title = {{Don't Use Fingerprint, it's Raining! How People Use and Perceive Context-Aware Selection of Mobile Authentication}},
year = {2020},
address = {New York, NY, USA},
note = {prange2020avi},
publisher = {Association for Computing Machinery},
series = {AVI '20},
abstract = {This paper investigates how smartphone users perceive switching from their primary authentication mechanism to a fallback one, based on the context. This is useful in cases where the primary mechanism fails (e.g., wet fingers when using fingerprint). While prior work introduced the concept, we are the first to investigate its perception by users and their willingness to follow a system's suggestion for a switch. We present findings from a two-week field study (N=29) using an Android app, showing that users are willing to adopt alternative mechanisms when prompted. We discuss how context-awareness can improve the perception of authentication reliability and potentially improve usability and security.},
articleno = {54},
doi = {10.1145/3399715.3399823},
isbn = {9781450375351},
keywords = {Android, Fingerprint, Field Study, Context-Aware Authentication, Biometrics, User Perception, Mobile Devices},
location = {Salerno, Italy},
numpages = {5},
timestamp = {2020.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2020avi.pdf},
}
M. Rittenbruch, R. Schroeter, F. Wirth, and F. Alt. An exploratory physical computing toolkit for rapid exploration and co-design of on-bicycle notification interfaces. In Proceedings of the 2020 acm designing interactive systems conference (), Association for Computing Machinery, New York, NY, USA, 2020, p. 873–884.
[BibTeX] [Abstract] [PDF]
Cycling offers significant health and environmental benefits, but safety remains a critical issue. We need better tools and design processes to develop on-bicycle notification interfaces, for example, for hazard warnings, and to overcome design challenges associated with the cycling context. We present a physical computing toolkit that supports the rapid exploration and co-design of on-bicycle interfaces. Physical plug-and-play interaction modules controlled by an orchestration interface allow participants to explore different tangible and ambient interaction approaches on a budget cycling simulator. The toolkit was assessed by analysing video recordings of two group design workshops (N=8) and twelve individual design sessions (N=12). Our results show that the toolkit enabled flexible transitions between ideation and out-of-the-box thinking, prototyping, and immediate evaluation. We offer insights on how to design physical computing toolkits that offer low-cost, ‘good enough’ simulation while allowing for free and safe exploration of on-bicycle notification interfaces.
@InProceedings{rittenbruch2020dis,
author = {Rittenbruch, Markus and Schroeter, Ronald and Wirth, Florian and Alt, Florian},
booktitle = {Proceedings of the 2020 ACM Designing Interactive Systems Conference},
title = {An Exploratory Physical Computing Toolkit for Rapid Exploration and Co-Design of On-Bicycle Notification Interfaces},
year = {2020},
address = {New York, NY, USA},
note = {rittenbruch2020dis},
pages = {873–884},
publisher = {Association for Computing Machinery},
abstract = {Cycling offers significant health and environmental benefits, but safety remains a
critical issue. We need better tools and design processes to develop on-bicycle notification
interfaces, for example, for hazard warnings, and to overcome design challenges associated
with the cycling context. We present a physical computing toolkit that supports the
rapid exploration and co-design of on-bicycle interfaces. Physical plug-and-play interaction
modules controlled by an orchestration interface allow participants to explore different
tangible and ambient interaction approaches on a budget cycling simulator. The toolkit
was assessed by analysing video recordings of two group design workshops (N=8) and
twelve individual design sessions (N=12). Our results show that the toolkit enabled
flexible transitions between ideation and out-of-the-box thinking, prototyping, and
immediate evaluation. We offer insights on how to design physical computing toolkits
that offer low-cost, 'good enough' simulation while allowing for free and safe exploration
isbn = {9781450369749},
numpages = {12},
timestamp = {2020.06.01},
url = {https://doi.org/10.1145/3357236.3395534},
}
T. Kosch, M. Hassib, R. Reutter, and F. Alt. Emotions on the Go: Mobile Emotion Assessment in Real-Time Using Facial Expressions. In Proceedings of the international conference on advanced visual interfaces (AVI ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3399715.3399928
[BibTeX] [Abstract] [PDF]
Exploiting emotions for user interface evaluation became an increasingly important research objective in Human-Computer Interaction. Emotions are usually assessed through surveys that do not allow information to be collected in real-time. In our work, we suggest the use of smartphones for mobile emotion assessment. We use the front-facing smartphone camera as a tool for emotion detection based on facial expressions. Such information can be used to reflect on emotional states or provide emotion-aware user interface adaptation. We collected facial expressions along with app usage data in a two-week field study consisting of a one-week training phase and a one-week testing phase. We built and evaluated a person-dependent classifier, yielding an average classification improvement of 33% compared to classifying facial expressions only. Furthermore, we correlate the estimated emotions with concurrent app usage to draw insights into changes in mood. Our work is complemented by a discussion of the feasibility of probing emotions on-the-go and potential use cases for future emotion-aware applications.
@InProceedings{kosch2020avi,
author = {Kosch, Thomas and Hassib, Mariam and Reutter, Robin and Alt, Florian},
booktitle = {Proceedings of the International Conference on Advanced Visual Interfaces},
title = {{Emotions on the Go: Mobile Emotion Assessment in Real-Time Using Facial Expressions}},
year = {2020},
address = {New York, NY, USA},
note = {kosch2020avi},
publisher = {Association for Computing Machinery},
series = {AVI '20},
abstract = {Exploiting emotions for user interface evaluation became an increasingly important research objective in Human-Computer Interaction. Emotions are usually assessed through surveys that do not allow information to be collected in real-time. In our work, we suggest the use of smartphones for mobile emotion assessment. We use the front-facing smartphone camera as a tool for emotion detection based on facial expressions. Such information can be used to reflect on emotional states or provide emotion-aware user interface adaptation. We collected facial expressions along with app usage data in a two-week field study consisting of a one-week training phase and a one-week testing phase. We built and evaluated a person-dependent classifier, yielding an average classification improvement of 33% compared to classifying facial expressions only. Furthermore, we correlate the estimated emotions with concurrent app usage to draw insights into changes in mood. Our work is complemented by a discussion of the feasibility of probing emotions on-the-go and potential use cases for future emotion-aware applications.},
articleno = {18},
doi = {10.1145/3399715.3399928},
isbn = {9781450375351},
keywords = {Affective Computing, Emotion Recognition, Emotion-Aware Interfaces, Mobile Sensing},
location = {Salerno, Italy},
numpages = {9},
timestamp = {2020.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/kosch2020avi.pdf},
}
V. Mäkelä, R. Radiah, S. Alsherif, M. Khamis, C. Xiao, L. Borchert, A. Schmidt, and F. Alt. Virtual Field Studies: Conducting Studies on PublicDisplays in Virtual Reality. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems (), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3313831.3376796
[BibTeX] [Abstract] [PDF] [Talk]
For the past 20 years, researchers have investigated the useof eye tracking in security applications. We present a holisticview on gaze-based security applications. In particular, we canvassedthe literature and classify the utility of gaze in securityapplications into a) authentication, b) privacy protection, andc) gaze monitoring during security critical tasks. This allowsus to chart several research directions, most importantly 1)conducting field studies of implicit and explicit gaze-basedauthentication due to recent advances in eye tracking, 2) researchon gaze-based privacy protection and gaze monitoringin security critical tasks which are under-investigated yet verypromising areas, and 3) understanding the privacy implicationsof pervasive eye tracking.We discuss the most promisingopportunities and most pressing challenges of eye trackingfor security that will shape research in gaze-based securityapplications for the next decade.
@InProceedings{makela2020chi,
author = {Ville Mäkelä AND Rivu Radiah AND Saleh Alsherif AND Mohamed Khamis AND Chong Xiao AND Lisa Borchert AND Albrecht Schmidt AND Florian Alt},
booktitle = {{Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems}},
title = {{Virtual Field Studies: Conducting Studies on PublicDisplays in Virtual Reality}},
year = {2020},
address = {New York, NY, USA},
note = {makela2020chi},
publisher = {Association for Computing Machinery},
abstract = {For the past 20 years, researchers have investigated the useof eye tracking in security applications. We present a holisticview on gaze-based security applications. In particular, we canvassedthe literature and classify the utility of gaze in securityapplications into a) authentication, b) privacy protection, andc) gaze monitoring during security critical tasks. This allowsus to chart several research directions, most importantly 1)conducting field studies of implicit and explicit gaze-basedauthentication due to recent advances in eye tracking, 2) researchon gaze-based privacy protection and gaze monitoringin security critical tasks which are under-investigated yet verypromising areas, and 3) understanding the privacy implicationsof pervasive eye tracking.We discuss the most promisingopportunities and most pressing challenges of eye trackingfor security that will shape research in gaze-based securityapplications for the next decade.},
doi = {10.1145/3313831.3376796},
isbn = {9781450367080},
keywords = {Virtual reality, field studies, public displays, research methods},
location = {Honolulu, HI, US},
timestamp = {2020.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/makela2020chi.pdf},
}
C. Katsini, Y. Abdrabou, G. E. Raptidis, M. Khamis, and F. Alt. The Role of Eye Gaze in Security and Privacy Applications:Survey and Future HCI Research Directions. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems (CHI ’20), Association for Computing Machinery, New York, NY, USA, 2020. doi:10.1145/3313831.3376840
[BibTeX] [Abstract] [PDF] [Video] [Talk]
For the past 20 years, researchers have investigated the useof eye tracking in security applications. We present a holisticview on gaze-based security applications. In particular, we canvassedthe literature and classify the utility of gaze in securityapplications into a) authentication, b) privacy protection, andc) gaze monitoring during security critical tasks. This allowsus to chart several research directions, most importantly 1)conducting field studies of implicit and explicit gaze-basedauthentication due to recent advances in eye tracking, 2) researchon gaze-based privacy protection and gaze monitoringin security critical tasks which are under-investigated yet verypromising areas, and 3) understanding the privacy implicationsof pervasive eye tracking.We discuss the most promisingopportunities and most pressing challenges of eye trackingfor security that will shape research in gaze-based securityapplications for the next decade.
@InProceedings{katsini2020chi,
author = {Christina Katsini AND Yasmeen Abdrabou AND George E. Raptidis AND Mohamed Khamis AND Florian Alt},
booktitle = {{Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems}},
title = {{The Role of Eye Gaze in Security and Privacy Applications:Survey and Future HCI Research Directions}},
year = {2020},
address = {New York, NY, USA},
note = {katsini2020chi},
publisher = {Association for Computing Machinery},
series = {CHI ’20},
abstract = {For the past 20 years, researchers have investigated the useof eye tracking in security applications. We present a holisticview on gaze-based security applications. In particular, we canvassedthe literature and classify the utility of gaze in securityapplications into a) authentication, b) privacy protection, andc) gaze monitoring during security critical tasks. This allowsus to chart several research directions, most importantly 1)conducting field studies of implicit and explicit gaze-basedauthentication due to recent advances in eye tracking, 2) researchon gaze-based privacy protection and gaze monitoringin security critical tasks which are under-investigated yet verypromising areas, and 3) understanding the privacy implicationsof pervasive eye tracking.We discuss the most promisingopportunities and most pressing challenges of eye trackingfor security that will shape research in gaze-based securityapplications for the next decade.},
doi = {10.1145/3313831.3376840},
isbn = {9781450367080},
keywords = {Eye tracking, Gaze Interaction, Security, Privacy, Survey},
location = {Honolulu, HI, US},
timestamp = {2020.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/katsini2020chi.pdf},
video = {katsini2020chi},
}
R. Rivu, Y. Abdrabou, K. Pfeuffer, M. Hassib, and F. Alt. Gaze’N’Touch: Enhancing Text Selection on Mobile Devices Using Gaze. In Extended abstracts of the 2020 chi conference on human factors in computing systems (CHI EA ’20), Association for Computing Machinery, New York, NY, USA, 2020, p. 1–8. doi:10.1145/3334480.3382802
[BibTeX] [Abstract] [PDF]
Text selection is a frequent task we do everyday to edit, modify or delete text. Selecting a word requires not only precision but also switching between selections and typing which influences both speed and error rates. In this paper, we evaluate a novel concept that extends text editing with an additional modality, that is gaze. We present a user study (N=16) where we explore how, the novel concepts called GazeButton can improve text selection by comparing it to touch based selection. In addition, we tested the effect of text size on the selection techniques by comparing two different text sizes.Results show that gaze based selection was faster with bigger text size, although not statistically significant. Qualitative feedback show a preference on gaze over touch which motivates a new direction of gaze usage in text editors.
@InProceedings{rivu2020chiea,
author = {Radiah Rivu AND Yasmeen Abdrabou AND Ken Pfeuffer AND Mariam Hassib AND Florian Alt},
booktitle = {Extended Abstracts of the 2020 CHI Conference on Human Factors in Computing Systems},
title = {{Gaze’N’Touch: Enhancing Text Selection on Mobile Devices Using Gaze}},
year = {2020},
address = {New York, NY, USA},
note = {rivu2020chiea},
pages = {1–8},
publisher = {Association for Computing Machinery},
series = {CHI EA '20},
abstract = {Text selection is a frequent task we do everyday to edit, modify or delete text. Selecting a word requires not only precision but also switching between selections and typing which influences both speed and error rates. In this paper, we evaluate a novel concept that extends text editing with an additional modality, that is gaze. We present a user study (N=16) where we explore how, the novel concepts called GazeButton can improve text selection by comparing it to touch based selection. In addition, we tested the effect of text size on the selection techniques by comparing two different text sizes.Results show that gaze based selection was faster with bigger text size, although not statistically significant. Qualitative feedback show a preference on gaze over touch which motivates a new direction of gaze usage in text editors.},
doi = {10.1145/3334480.3382802},
isbn = {9781450368193},
keywords = {gaze and touch, interaction, text editing, gaze selection},
location = {Honolulu, HI, USA},
numpages = {8},
timestamp = {2020.05.02},
url = {http://florian-alt.org/unibw/wp-content/publications/rivu2020chiea.pdf},
}
S. Prange and F. Alt. I Wish You Were Smart(er): Investigating Users’ Desires and Needs Towards Home Appliances. In Extended abstracts of the 2020 chi conference on human factors in computing systems (CHI EA ’20), Association for Computing Machinery, New York, NY, USA, 2020, p. 1–8. doi:10.1145/3334480.3382910
[BibTeX] [Abstract] [PDF]
In this work, we present findings from an online survey (N=77) in which we assessed situations of users wishing for features or devices in their home to be smart(er). Our work is motivated by the fact that on one hand, several successful smart devices and features found their way into users’ homes (e.g., smart TVs, smart assistants, smart toothbrushes). On the other hand, a more holistic understanding of when and why users would like devices and features to be smart is missing as of today. Such knowledge is valuable for researchers and practitioners to inform the design of future smart home devices and features, in particular with regards to interaction techniques, privacy mechanisms, and, ultimately, acceptance and uptake. We found that users would appreciate smart features for various use cases, including remote control and multi-tasking, and are willing to share devices. We believe our work to be useful for designers and HCI researchers by supporting the design and evaluation of future smart devices.
@InProceedings{prange2020chiea,
author = {Prange, Sarah and Alt, Florian},
booktitle = {Extended Abstracts of the 2020 CHI Conference on Human Factors in Computing Systems},
title = {{I Wish You Were Smart(er): Investigating Users' Desires and Needs Towards Home Appliances}},
year = {2020},
address = {New York, NY, USA},
note = {prange2020chiea},
pages = {1–8},
publisher = {Association for Computing Machinery},
series = {CHI EA '20},
abstract = {In this work, we present findings from an online survey (N=77) in which we assessed situations of users wishing for features or devices in their home to be smart(er). Our work is motivated by the fact that on one hand, several successful smart devices and features found their way into users' homes (e.g., smart TVs, smart assistants, smart toothbrushes). On the other hand, a more holistic understanding of when and why users would like devices and features to be smart is missing as of today. Such knowledge is valuable for researchers and practitioners to inform the design of future smart home devices and features, in particular with regards to interaction techniques, privacy mechanisms, and, ultimately, acceptance and uptake. We found that users would appreciate smart features for various use cases, including remote control and multi-tasking, and are willing to share devices. We believe our work to be useful for designers and HCI researchers by supporting the design and evaluation of future smart devices.},
doi = {10.1145/3334480.3382910},
isbn = {9781450368193},
keywords = {smart devices, smart homes, online survey},
location = {Honolulu, HI, USA},
numpages = {8},
timestamp = {2020.05.02},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2020chiea.pdf},
}
A. Saad, S. D. Rodriguez, R. Heger, F. Alt, and S. Schneegass. Understanding User-Centered Attacks In-The-Wild. In Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones (WABDS’20), 2020.
[BibTeX] [PDF]
@InProceedings{saad2020wabds,
author = {Alia Saad AND Sarah Delgado Rodriguez AND Roman Heger AND Florian Alt AND Stefan Schneegass},
booktitle = {{Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones}},
title = {{Understanding User-Centered Attacks In-The-Wild}},
year = {2020},
note = {abdrabou2020wabds},
series = {WABDS'20},
timestamp = {2020.04.27},
url = {http://florian-alt.org/unibw/wp-content/publications/abdrabou2020wabds.pdf},
}
R. Radiah, V. Maekelae, M. Hassib, and F. Alt. Understanding Emotions in Virtual Reality. In Proceedings of the 1st CHI Workshop on Momentary Emotion Elicitation and Capture (MEEC’20), 2020.
[BibTeX] [PDF]
@InProceedings{rivu2020meec,
author = {Rivu Radiah AND Ville Maekelae AND Mariam Hassib AND Florian Alt},
booktitle = {{Proceedings of the 1st CHI Workshop on Momentary Emotion Elicitation and Capture}},
title = {{Understanding Emotions in Virtual Reality}},
year = {2020},
note = {rivu2020meec},
series = {MEEC'20},
timestamp = {2020.04.27},
url = {http://florian-alt.org/unibw/wp-content/publications/rivu2020meec.pdf},
}
Y. Abdrabou, S. Prange, L. Mecke, K. Pfeuffer, and F. Alt. VolumePatterns: Using Hardware Buttons beyond Volume Control on Mobile Devices. In Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones (WABDS’20), 2020.
[BibTeX] [PDF]
@InProceedings{abdrabou2020wabds,
author = {Yasmeen Abdrabou AND Sarah Prange AND Lukas Mecke AND Ken Pfeuffer AND Florian Alt},
booktitle = {{Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones}},
title = {{VolumePatterns: Using Hardware Buttons beyond Volume Control on Mobile Devices}},
year = {2020},
series = {WABDS'20},
timestamp = {2020.04.27},
}
S. Prange and F. Alt. Interact2Authenticate: Towards Usable Authentication in Smart Environments. In Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones (WABDS’20), 2020.
[BibTeX] [PDF]
@InProceedings{prange2020wabds,
author = {Sarah Prange AND Florian Alt},
booktitle = {{Proceedings of the 1st CHI Workshop on Authentication Beyond Desktops and Smartphones}},
title = {{Interact2Authenticate: Towards Usable Authentication in Smart Environments}},
year = {2020},
note = {prange2020wabds},
series = {WABDS'20},
timestamp = {2020.04.27},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2020wabds.pdf},
}
A. Colley, B. Pfleging, F. Alt, and J. Häkkilä. Exploring Public Wearable Display of Wellness Tracker Data. International Journal of Human-Computer Studies, 2020. doi:https://doi.org/10.1016/j.ijhcs.2020.102408
[BibTeX] [Abstract] [PDF]
We investigate wearable presentation of tracked wellness data, and people’s perceptions and motivations for sharing it through a wearable display. Whilst online sharing is a common feature in wellness tracking solutions, the motivations and experiences of users to share tracked data in situ has not been widely studied. We created two functional prototypes – the hat tracker and the tracker badge – which we used as probes in two focus groups to elicit opinions on the content and format of wearable tracker displays. Complementing this, a study where participants used the hat tracker prototype in public locations provides insights on sharing in everyday life use contexts. We report that users appreciate the motivating nature of such displays, but favor the display of positive information. Leveraging prior work, we present a model describing the factors affecting users’ willingness to share tracked data via wearable displays, and highlight such displays’ potential for supporting behavior change.
@Article{colley2020ijhcs,
author = {Ashley Colley and Bastian Pfleging and Florian Alt and Jonna Häkkilä},
journal = {{International Journal of Human-Computer Studies}},
title = {{Exploring Public Wearable Display of Wellness Tracker Data}},
year = {2020},
issn = {1071-5819},
month = jan,
note = {colley2020ijhcs},
abstract = {We investigate wearable presentation of tracked wellness data, and people’s perceptions and motivations for sharing it through a wearable display. Whilst online sharing is a common feature in wellness tracking solutions, the motivations and experiences of users to share tracked data in situ has not been widely studied. We created two functional prototypes – the hat tracker and the tracker badge – which we used as probes in two focus groups to elicit opinions on the content and format of wearable tracker displays. Complementing this, a study where participants used the hat tracker prototype in public locations provides insights on sharing in everyday life use contexts. We report that users appreciate the motivating nature of such displays, but favor the display of positive information. Leveraging prior work, we present a model describing the factors affecting users’ willingness to share tracked data via wearable displays, and highlight such displays’ potential for supporting behavior change.},
doi = {https://doi.org/10.1016/j.ijhcs.2020.102408},
timestamp = {2020.01.29},
url = {http://florian-alt.org/unibw/wp-content/publications/colley2020ijhcs.pdf},
}
M. Braun and F. Alt, “Character Computing,” in Character computing, 1 ed., A. E. Bolock, Y. Abdelrahman, and S. Abdennadher, Eds., Springer International Publishing, 2020, p. 15. doi:10.1007/978-3-030-15954-2
[BibTeX] [PDF]
@InBook{braun2020springer,
author = {Michael Braun AND Florian Alt},
editor = {Alia El Bolock AND Yomna Abdelrahman AND Slim Abdennadher},
chapter = {{Identifying Personality Dimensions for DigitalAgents}},
pages = {15},
publisher = {Springer International Publishing},
title = {{Character Computing}},
year = {2020},
edition = {1},
isbn = {978-3-030-15954-2},
note = {braun2020springer},
series = {Human-Computer Interaction Series},
booktitle = {Character Computing},
doi = {10.1007/978-3-030-15954-2},
owner = {florian},
timestamp = {2020.01.28},
url = {http://florian-alt.org/unibw/wp-content/publications/braun2020springer.pdf},
}

### 2019

F. Alt and E. von Zezschwitz. Emerging Trends in Usable Security and Privacy. Journal of Interactive Media (icom), vol. 18, iss. 3, 2019. doi:10.1515/icom-2019-0019
[BibTeX] [PDF]
@Article{alt2019icom,
author = {Florian Alt AND Emanuel von Zezschwitz},
journal = {{Journal of Interactive Media (icom)}},
title = {{Emerging Trends in Usable Security and Privacy}},
year = {2019},
month = dec,
note = {alt2019icom},
number = {3},
volume = {18},
doi = {10.1515/icom-2019-0019},
owner = {florian},
timestamp = {2019.12.31},
url = {http://florian-alt.org/unibw/wp-content/publications/alt2019icom.pdf},
}
S. Faltaous, J. Liebers, Y. Abdelrahman, F. Alt, and S. Schneegass. VPID: Towards Vein Pattern Identification Using Thermal Imaging. Journal of Interactive Media (icom), vol. 18, iss. 3, 2019. doi:10.1515/icom-2019-0019
[BibTeX] [PDF]
@Article{faltaous2019icom,
author = {Sarah Faltaous AND Jonathan Liebers AND Yomna Abdelrahman AND Florian Alt AND Stefan Schneegass},
journal = {{Journal of Interactive Media (icom)}},
title = {{VPID: Towards Vein Pattern Identification Using Thermal Imaging}},
year = {2019},
issn = {1618-162X},
month = dec,
note = {faltaous2019icom},
number = {3},
volume = {18},
doi = {10.1515/icom-2019-0019},
owner = {florian},
timestamp = {2019.12.31},
url = {http://florian-alt.org/unibw/wp-content/publications/faltaous2019icom.pdf},
}
F. Alt and E. von Zezschwitz. Special Issue: Emerging Trends in Usable Security and Privacy. Journal of interactive media (icom), vol. 18, iss. 3, 2019.
[BibTeX] [PDF]
@Periodical{alt2019icomsi,
title = {{Special Issue: Emerging Trends in Usable Security and Privacy}},
year = {2019},
editor = {Florian Alt AND Emanuel von Zezschwitz},
language = {German},
series = {Journal of Interactive Media},
volume = {18},
number = {3},
organization = {De Gruyter},
month = dec,
note = {alt2019icomsi},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2019icomsi.pdf},
author = {Florian Alt AND Emanuel von Zezschwitz},
journal = {Journal of Interactive Media (icom)},
owner = {florian},
timestamp = {2019.12.30},
}
Y. Abdelrahman, P. Woźniak, P. Knierim, D. Weber, K. Pfeuffer, N. Henze, A. Schmidt, and F. Alt. Exploring the Domestication of Thermal Imaging. In Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia (MUM’19), ACM, New York, NY, USA, 2019. doi:10.1145/3365610.336564
[BibTeX] [Abstract] [PDF]
Recent work demonstrated the opportunities of thermal imagingin the development of novel interactive systems. However, the explorationis limited to controlled lab setups. Hence, little we knowabout how thermal imaging could be useful for a broader range ofdaily applications by novice users. To investigate the potential of domesticationof thermal imaging, we conducted an exploration witha technology-cultural probe. Ten households (26 individuals) useda mobile thermal camera in their daily life. We collected thermalphotos taken by the participants and conducted interviews afterusing the camera.We found that the users were excited about usingthermal cameras in their everyday lives and found many practicaluses for them. Our study provides insights into how novice userswish to use thermal imaging technology to augment their vision indaily setups, as well as identifying and classifying common thermalimaging use cases. Our work contributes implications for designingthermal imaging devices targeted towards novice users.
@InProceedings{abdelrahman2019mum,
author = {Yomna Abdelrahman and Paweł Woźniak and Pascal Knierim and Dominik Weber and Ken Pfeuffer and Niels Henze and Albrecht Schmidt and Florian Alt},
booktitle = {{Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Exploring the Domestication of Thermal Imaging}},
year = {2019},
address = {New York, NY, USA},
note = {abdelrahman2019mum},
publisher = {ACM},
series = {MUM'19},
abstract = {Recent work demonstrated the opportunities of thermal imagingin the development of novel interactive systems. However, the explorationis limited to controlled lab setups. Hence, little we knowabout how thermal imaging could be useful for a broader range ofdaily applications by novice users. To investigate the potential of domesticationof thermal imaging, we conducted an exploration witha technology-cultural probe. Ten households (26 individuals) useda mobile thermal camera in their daily life. We collected thermalphotos taken by the participants and conducted interviews afterusing the camera.We found that the users were excited about usingthermal cameras in their everyday lives and found many practicaluses for them. Our study provides insights into how novice userswish to use thermal imaging technology to augment their vision indaily setups, as well as identifying and classifying common thermalimaging use cases. Our work contributes implications for designingthermal imaging devices targeted towards novice users.},
doi = {10.1145/3365610.336564},
location = {Pisa, Italy},
timestamp = {2019.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdelrahman2019mum.pdf},
}
S. Prange, L. Mecke, M. Stadler, M. Balluff, M. Khamis, and F. Alt. Securing Personal Items in Public Space – Stories of Attacks and Threats. In Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia (MUM’19), ACM, New York, NY, USA, 2019. doi:10.1145/3365610.3365628
[BibTeX] [Abstract] [PDF]
While we put great effort in protecting digital devices and data,there is a lack of research on usable techniques to secure personalitems that we carry in public space. To better understand situationswhere ubiquitous technologies could help secure personal items,we conducted an online survey (N=101) in which we collected real-world stories from users reporting on personal items, either at riskof, or actually being lost, damaged or stolen. We found that themajority of cases occurred in (semi-)public spaces during afternoonand evening times, when users left their items. From these results,we derived a model of incidents involving personal items in publicspace as well as a set of properties to describe situations wherepersonal items may be at risk. We discuss reoccurring properties ofthe scenarios, potential multimedia-based protection mechanismsfor securing personal items in public space as well as future researchsuggestions
@InProceedings{prange2019mum,
author = {Sarah Prange AND Lukas Mecke AND Michael Stadler AND Maximilian Balluff AND Mohamed Khamis and Alt, Florian},
booktitle = {{Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Securing Personal Items in Public Space - Stories of Attacks and Threats}},
year = {2019},
address = {New York, NY, USA},
note = {prange2019mum},
publisher = {ACM},
series = {MUM'19},
abstract = {While we put great effort in protecting digital devices and data,there is a lack of research on usable techniques to secure personalitems that we carry in public space. To better understand situationswhere ubiquitous technologies could help secure personal items,we conducted an online survey (N=101) in which we collected real-world stories from users reporting on personal items, either at riskof, or actually being lost, damaged or stolen. We found that themajority of cases occurred in (semi-)public spaces during afternoonand evening times, when users left their items. From these results,we derived a model of incidents involving personal items in publicspace as well as a set of properties to describe situations wherepersonal items may be at risk. We discuss reoccurring properties ofthe scenarios, potential multimedia-based protection mechanismsfor securing personal items in public space as well as future researchsuggestions},
doi = {10.1145/3365610.3365628},
location = {Pisa, Italy},
timestamp = {2019.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2019mum.pdf},
}
H. Drewes, M. Khamis, and F. Alt. DialPlates:Enabling Pursuits-based User Interfaces with Large Target Numbers. In Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia (MUM’19), ACM, New York, NY, USA, 2019. doi:10.1145/3365610.3365626
[BibTeX] [Abstract] [PDF]
In this paper we introduce a novel approach for smooth pursuitseye movement detection and demonstrate that it allows up to 160targets to be distinguished. With this work we advance the well-established smooth pursuits technique, which allows gaze interac-tion without calibration. The approach is valuable for researchersand practitioners, since it enables novel user interfaces and appli-cations to be created that employ a large number of targets, forexample, a pursuits-based keyboard or a smart home where manydifferent objects can be controlled using gaze. We present findingsfrom two studies. In particular, we compare our novel detectionalgorithm based on linear regression with the correlation method.We quantify its accuracy for around 20 targets on a single circleand up to 160 targets on multiple circles. Finally, we implemented apursuits-based keyboard app with 108 targets as proof-of-concept
@InProceedings{drewes2019mum,
author = {Drewes, Heiko and Khamis, Mohamed and Alt, Florian},
booktitle = {{Proceedings of the 18th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{DialPlates:Enabling Pursuits-based User Interfaces with Large Target Numbers}},
year = {2019},
address = {New York, NY, USA},
note = {drewes2019mum},
publisher = {ACM},
series = {MUM'19},
abstract = {In this paper we introduce a novel approach for smooth pursuitseye movement detection and demonstrate that it allows up to 160targets to be distinguished. With this work we advance the well-established smooth pursuits technique, which allows gaze interac-tion without calibration. The approach is valuable for researchersand practitioners, since it enables novel user interfaces and appli-cations to be created that employ a large number of targets, forexample, a pursuits-based keyboard or a smart home where manydifferent objects can be controlled using gaze. We present findingsfrom two studies. In particular, we compare our novel detectionalgorithm based on linear regression with the correlation method.We quantify its accuracy for around 20 targets on a single circleand up to 160 targets on multiple circles. Finally, we implemented apursuits-based keyboard app with 108 targets as proof-of-concept},
doi = {10.1145/3365610.3365626},
location = {Pisa, Italy},
timestamp = {2019.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/drewes2019mum.pdf},
}
K. Holländer, A. Colley, C. Mai, J. Häkkilä, F. Alt, and B. Pfleging. Investigating the influence of external car displays on pedestrians’ crossing behavior in virtual reality. In Proceedings of the 21st international conference on human-computer interaction with mobile devices and services (MobileHCI ’19), Association for Computing Machinery, New York, NY, USA, 2019. doi:10.1145/3338286.3340138
[BibTeX] [Abstract] [PDF]
Focusing on pedestrian safety in the era of automated vehicles, we investigate the interaction between pedestrians and automated cars. In particular, we investigate the influence of external car displays (ECDs) on pedestrians’ crossing behavior, and the time needed to make a crossing decision. We present a study in a high-immersion VR environment comparing three alternative car-situated visualizations: a smiling grille, a traffic light style indicator, and a gesturing robotic driver. Crossing at non-designated crossing points on a straight road and at a junction, where vehicles turn towards the pedestrian, are explored. We report that ECDs significantly reduce pedestrians’ decision time, and argue that ECDs support comfort, trust and acceptance in automated vehicles. We believe ECDs might become a valuable addition for future vehicles.
@InProceedings{hollaender2019mobilehci,
author = {Holl\"{a}nder, Kai and Colley, Ashley and Mai, Christian and H\"{a}kkil\"{a}, Jonna and Alt, Florian and Pfleging, Bastian},
booktitle = {Proceedings of the 21st International Conference on Human-Computer Interaction with Mobile Devices and Services},
title = {Investigating the Influence of External Car Displays on Pedestrians' Crossing Behavior in Virtual Reality},
year = {2019},
address = {New York, NY, USA},
note = {hollaender2019mobilehci},
publisher = {Association for Computing Machinery},
series = {MobileHCI '19},
abstract = {Focusing on pedestrian safety in the era of automated vehicles, we investigate the interaction between pedestrians and automated cars. In particular, we investigate the influence of external car displays (ECDs) on pedestrians' crossing behavior, and the time needed to make a crossing decision. We present a study in a high-immersion VR environment comparing three alternative car-situated visualizations: a smiling grille, a traffic light style indicator, and a gesturing robotic driver. Crossing at non-designated crossing points on a straight road and at a junction, where vehicles turn towards the pedestrian, are explored. We report that ECDs significantly reduce pedestrians' decision time, and argue that ECDs support comfort, trust and acceptance in automated vehicles. We believe ECDs might become a valuable addition for future vehicles.},
articleno = {27},
doi = {10.1145/3338286.3340138},
isbn = {9781450368254},
keywords = {Traffic safety, Virtual reality, External car displays, Pedestrian-autonomous vehicle interaction, Autonomous vehicles},
location = {Taipei, Taiwan},
numpages = {11},
timestamp = {2019.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hollaender2019mobilehci.pdf},
}
Proceedings of Mensch und Computer 2019New York, NY, USA: ACM, 2019.
[BibTeX] [PDF]
@Proceedings{alt2019muc,
title = {{Proceedings of Mensch und Computer 2019}},
year = {2019},
editor = {Alt, Florian and Bulling, Andreas and D\"{o}ring, Tanja},
series = {MuC'19},
address = {New York, NY, USA},
publisher = {ACM},
isbn = {978-1-4503-7198-8},
note = {alt2019muc},
location = {Hamburg, Germany},
timestamp = {2019.09.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2019muc.pdf},
}
M. Hassib, M. Braun, B. Pfleging, and F. Alt. Detecting and influencing driver emotions using psycho-physiological sensors and ambient light. In Proceedings of the 17th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’19), Springer, Berlin-Heidelberg, Germany, 2019.
[BibTeX] [Abstract] [PDF]
Driving is a sensitive task that is strongly affected by the driver’s emotions. Negative emotions, such as anger, can evidently lead to more driving errors. In this work, we introduce a concept of detecting and influencing driver emotions using psycho-physiological sensing for emotion classi cation and ambient light for feedback. We detect arousal and valence of emotional responses from wearable bio-electric sensors, namely brain-computer interfaces and heart rate sensors. We evaluated our concept in a static driving simulator with a fully equipped car with 12 participants. Before the rides, we elicit negative emotions and evaluate driving performance and physiological data while driving under stressful conditions. We use three ambient lighting conditions (no light, blue, orange). Using a subject-dependent random forests classi er with 40 features collected from physiological data we achieve an average accuracy of 78.9\% for classifying valence and 68.7\% for arousal. Driving performance was enhanced in conditions where ambient lighting was introduced. Both blue and orange light helped drivers to improve lane keeping. We discuss insights from our study and provide design recommendations for designing emotion sensing and feedback systems in the car.
@InProceedings{hassib2019interact,
author = {Mariam Hassib and Michael Braun and Bastian Pfleging and Florian Alt},
booktitle = {{Proceedings of the 17th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{Detecting and influencing driver emotions using psycho-physiological sensors and ambient light}},
year = {2019},
month = {4},
note = {hassib2019interact},
publisher = {Springer},
series = {INTERACT '19},
abstract = {Driving is a sensitive task that is strongly affected by the driver's emotions. Negative emotions, such as anger, can evidently lead to more driving errors. In this work, we introduce a concept of detecting and influencing driver emotions using psycho-physiological sensing for emotion classication and ambient light for feedback. We detect arousal and valence of emotional responses from wearable bio-electric sensors, namely brain-computer interfaces and heart rate sensors. We evaluated our concept in a static driving simulator with a fully equipped car with 12 participants. Before the rides, we elicit negative emotions and evaluate driving performance and physiological data while driving under stressful conditions. We use three ambient lighting conditions (no light, blue, orange). Using a subject-dependent random forests classier with 40 features collected from physiological data we achieve an average accuracy of 78.9\% for classifying valence and 68.7\% for arousal. Driving performance was enhanced in conditions where ambient lighting was introduced. Both
blue and orange light helped drivers to improve lane keeping. We discuss insights from our study and provide design recommendations for designing emotion sensing and feedback systems in the car.},
day = {1},
keywords = {Affective Computing, Automotive UI, EEG, Ambient Light},
language = {English},
location = {Paphos, Cyprus},
owner = {florian},
timestamp = {2019.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019interact2.pdf},
}
M. Braun, R. Chadowitz, and F. A. Alt. User Experience of Driver State Visualizations: a Look at Demographics and Personalities. In Proceedings of the 17th IFIP TC 13 International Conference on Human-Computer Interaction (INTERACT ’19), Springer, Berlin-Heidelberg, Germany, 2019.
[BibTeX] [Abstract] [PDF]
Driver state detection is an emerging topic for automotive user interfaces. Motivated by the trend of self-tracking, one crucial question within this eld is how or whether detected states should be displayed. In this work we investigate the impact of demographics and personality traits on the user experience of driver state visualizations. 328 participants experienced three concepts visualizing their current state in a publicly installed driving simulator. Driver age, experience, and personality traits were shown to have impact on visualization preferences. While a continuous display was generally preferred, older respondents and drivers with little experience favored a system with less visual elements. Extroverted participants were more open towards interventions. Our fi ndings lead us to believe that, while users are generally open to driver state detection, its visualization should be adapted to age, driving experience, and personality. This work is meant to support professionals and researchers designing affective in-car information systems.
@InProceedings{braun2019interact,
author = {Braun, Michael and Chadowitz, Ronee and Alt, Florian Alt},
booktitle = {{Proceedings of the 17th IFIP TC 13 International Conference on Human-Computer Interaction}},
title = {{User Experience of Driver State Visualizations: a Look at Demographics and Personalities}},
year = {2019},
month = {4},
note = {braun2019interact},
publisher = {Springer},
series = {INTERACT '19},
abstract = {Driver state detection is an emerging topic for automotive user interfaces. Motivated by the trend of self-tracking, one crucial question within this eld is how or whether detected states should be displayed. In this work we investigate the impact of demographics and personality traits on the user experience of driver state visualizations. 328 participants experienced three concepts visualizing their current state in a publicly installed driving simulator. Driver age, experience, and personality traits were shown to have impact on visualization preferences. While a continuous display was generally preferred, older respondents and drivers with little experience favored a system with less visual elements. Extroverted participants were more open towards interventions. Our findings lead us to believe that, while users are generally open to driver state detection, its visualization should be adapted to age, driving experience, and personality. This work is meant to support professionals and researchers designing affective in-car information systems.},
day = {1},
keywords = {Affective Computing, Emotion Detection, Demographics, Personality, Driver State Visualization, Automotive User Interfaces},
language = {English},
location = {Paphos, Cyprus},
owner = {florian},
timestamp = {2019.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019interact.pdf},
}
L. Mecke, D. Buschek, M. Kiermeier, S. Prange, and F. Alt. Exploring Intentional Behaviour Modifications for Password Typing on Mobile Touchscreen Devices. In Fifteenth symposium on usable privacy and security (SOUPS 2019) (SOUPS 2019), {USENIX} Association, Santa Clara, CA, 2019, p. 303–317.
[BibTeX] [PDF] [Talk] [Slides]
@InProceedings{mecke2019soups2,
author = {Lukas Mecke and Daniel Buschek and Mathias Kiermeier and Sarah Prange and Florian Alt},
booktitle = {Fifteenth Symposium on Usable Privacy and Security ({SOUPS} 2019)},
title = {{Exploring Intentional Behaviour Modifications for Password Typing on Mobile Touchscreen Devices}},
year = {2019},
month = aug,
note = {mecke2019soups2},
pages = {303--317},
publisher = {{USENIX} Association},
series = {SOUPS 2019},
isbn = {978-1-939133-05-2},
slides = {https://www.usenix.org/sites/default/files/conference/protected-files/soups2019_slides_mecke_behaviour.pdf},
talk = {https://youtu.be/EzTXFUnGDI0},
timestamp = {2019.08.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2019soups2.pdf},
}
L. Mecke, S. D. Rodriguez, D. Buschek, S. Prange, and F. Alt. Communicating Device Confidence Level and Upcoming Re-Authentications in Continuous Authentication Systems on Mobile Devices. In Proceedings of the Fifteenth Symposium on Usable Privacy and Security (SOUPS 2019), {USENIX} Association, Santa Clara, CA, 2019, p. 289–301.
[BibTeX] [PDF] [Talk] [Slides]
@InProceedings{mecke2019soups1,
author = {Lukas Mecke and Sarah Delgado Rodriguez and Daniel Buschek and Sarah Prange and Florian Alt},
booktitle = {{Proceedings of the Fifteenth Symposium on Usable Privacy and Security}},
title = {{Communicating Device Confidence Level and Upcoming Re-Authentications in Continuous Authentication Systems on Mobile Devices}},
year = {2019},
month = aug,
note = {mecke2019soups1},
pages = {289--301},
publisher = {{USENIX} Association},
series = {SOUPS 2019},
isbn = {978-1-939133-05-2},
slides = {https://www.usenix.org/sites/default/files/conference/protected-files/soups19_slides_mecke.pdf},
talk = {https://youtu.be/eFd7NSt45Oo},
timestamp = {2019.08.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2019soups1.pdf},
}
S. Prange, Y. Abdrabou, L. Mecke, and F. Alt. Hidden in Plain Sight:Using Lockscreen Content forAuthentication on Mobile Devices. In Proceedings of the Fifteenth Symposium on Usable Privacy and Security (SOUPS 2019), {USENIX} Association, Santa Clara, CA, 2019.
[BibTeX] [PDF]
@InProceedings{prange2019soupsadj,
author = {Sarah Prange AND Yasmeen Abdrabou AND Lukas Mecke and Florian Alt},
booktitle = {{Proceedings of the Fifteenth Symposium on Usable Privacy and Security}},
title = {{Hidden in Plain Sight:Using Lockscreen Content forAuthentication on Mobile Devices}},
year = {2019},
publisher = {{USENIX} Association},
series = {SOUPS 2019},
timestamp = {2019.08.12},
}
S. R. R. Rivu, Y. Abdrabou, T. Mayer, K. Pfeuffer, and F. Alt. GazeButton: Enhancing Buttons with Eye Gaze Interactions. In Proceedings of the 2019 ACM Symposium on Eye Tracking Research & Applications (COGAIN ’19), Association for Computing Machinery, New York, NY, USA, 2019. doi:10.1145/3317956.3318154
[BibTeX] [Abstract] [PDF]
The button is an element of a user interface to trigger an action, traditionally using click or touch. We introduce GazeButton, a novel concept extending the default button mode with advanced gaze-based interactions. During normal interaction, users can utilise this button as a universal hub for gaze-based UI shortcuts. The advantages are: 1) easy to integrate in existing UIs, 2) complementary, as users choose either gaze or manual interaction, 3) straightforward, as all features are located in one button, and 4) one button to interact with the whole screen. We explore GazeButtons for a custom-made text reading, writing, and editing tool on a multitouch tablet device. For example, this allows the text cursor position to be set as users look at the position and tap on the GazeButton, avoiding costly physical movement. Or, users can simply gaze over a part of the text that should be selected, while holding the GazeButton. We present a design space, specific application examples, and point to future button designs that become highly expressive by unifying the user’s visual and manual input.
@InProceedings{rivu2019cogain,
author = {Sheikh Radiah Rahim Rivu AND Yasmeen Abdrabou AND Thomas Mayer AND Ken Pfeuffer AND Florian Alt},
booktitle = {{Proceedings of the 2019 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{GazeButton: Enhancing Buttons with Eye Gaze Interactions}},
year = {2019},
address = {New York, NY, USA},
note = {rivu2019cogain},
publisher = {Association for Computing Machinery},
series = {COGAIN '19},
abstract = {The button is an element of a user interface to trigger an action, traditionally using click or touch. We introduce GazeButton, a novel concept extending the default button mode with advanced gaze-based interactions. During normal interaction, users can utilise this button as a universal hub for gaze-based UI shortcuts. The advantages are: 1) easy to integrate in existing UIs, 2) complementary, as users choose either gaze or manual interaction, 3) straightforward, as all features are located in one button, and 4) one button to interact with the whole screen. We explore GazeButtons for a custom-made text reading, writing, and editing tool on a multitouch tablet device. For example, this allows the text cursor position to be set as users look at the position and tap on the GazeButton, avoiding costly physical movement. Or, users can simply gaze over a part of the text that should be selected, while holding the GazeButton. We present a design space, specific application examples, and point to future button designs that become highly expressive by unifying the user's visual and manual input.},
articleno = {73},
doi = {10.1145/3317956.3318154},
isbn = {9781450367097},
keywords = {touch and gaze, text input, interaction modality},
numpages = {7},
timestamp = {2019.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2019cogain.pdf},
}
H. Drewes, K. Pfeuffer, and F. Alt. Time- and Space-efficient Eye Tracker Calibration. In Proceedings of the 2019 ACM Symposium on Eye Tracking Research & Applications (ETRA ’19), ACM, New York, NY, USA, 2019. doi:10.1145/3314111.3319818
[BibTeX] [Abstract] [PDF]
One of the obstacles to bring eye tracking technology to everyday human computer interactions is the time consuming calibration procedure. In this paper we investigate a novel calibration method based on smooth pursuit eye movement. The method uses linear regression to calculate the calibration mapping. The advantage is that users can perform the calibration quickly in a few seconds and only use a small calibration area to cover a large tracking area. We first describe the theoretical background on establishing a calibration mapping and discuss differences of calibration methods used. We then present a user study comparing the new regression based method with a classical nine-point and with other pursuit based calibrations. The results show the proposed method is fully functional, quick, and enables accurate tracking of a large area. The method has the potential to be integrated into current eye tracking systems to make them more usable in various use cases.
@InProceedings{drewes2019etra,
author = {Drewes, Heiko and Pfeuffer, Ken and Alt, Florian},
booktitle = {{Proceedings of the 2019 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{Time- and Space-efficient Eye Tracker Calibration}},
year = {2019},
address = {New York, NY, USA},
note = {drewes2019etra},
publisher = {ACM},
series = {ETRA '19},
abstract = {One of the obstacles to bring eye tracking technology to everyday human computer interactions is the time consuming calibration procedure. In this paper we investigate a novel calibration method based on smooth pursuit eye movement. The method uses linear regression to calculate the calibration mapping. The advantage is that users can perform the calibration quickly in a few seconds and only use a small calibration area to cover a large tracking area. We first describe the theoretical background on establishing a calibration mapping and discuss differences of calibration methods used. We then present a user study comparing the new regression based method with a classical nine-point and with other pursuit based calibrations. The results show the proposed method is fully functional, quick, and enables accurate tracking of a large area. The method has the potential to be integrated into current eye tracking systems to make them more usable in various use cases.},
acmid = {3319818},
doi = {10.1145/3314111.3319818},
isbn = {978-1-4503-6709-7},
keywords = {eye-tracking, calibration, eye-tracker, smooth pursuit, eye movement},
location = {Denver, CO, USA},
numpages = {8},
owner = {florian},
timestamp = {2019.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/drewes2019etra.pdf},
}
C. George, P. Janssen, D. Heuss, and F. Alt. Should i interrupt or not? understanding interruptions in head-mounted display settings. In Proceedings of the 2019 on designing interactive systems conference (DIS ’19), Association for Computing Machinery, New York, NY, USA, 2019, p. 497–510. doi:10.1145/3322276.3322363
[BibTeX] [Abstract] [PDF]
Head-mounted displays (HMDs) are being used for VR and AR applications and increasingly permeate our everyday life. At the same time, a detailed understanding of interruptions in settings where people wearing an HMD (HMD user) and people not wearing an HMD (bystander) is missing. We investigate (a) whether bystanders are capable of identifying when HMD users switch tasks by observing their gestures, and hence exploit opportune moments for interruptions, and (b) which strategies bystanders employ. In a lab study (N=64) we found that bystanders are able to successfully identify both task switches (83%) and tasks (77%) within only a few seconds of the task switch. Furthermore, we identified interruption strategies of bystanders. From our results we derive implications meant to support designers and practitioners in building HMD applications that are used in a co-located collaborative setting.
@InProceedings{george2019dis,
author = {George, Ceenu and Janssen, Philipp and Heuss, David and Alt, Florian},
booktitle = {Proceedings of the 2019 on Designing Interactive Systems Conference},
title = {Should I Interrupt or Not? Understanding Interruptions in Head-Mounted Display Settings},
year = {2019},
address = {New York, NY, USA},
note = {george2019dis},
pages = {497–510},
publisher = {Association for Computing Machinery},
series = {DIS '19},
abstract = {Head-mounted displays (HMDs) are being used for VR and AR applications and increasingly
permeate our everyday life. At the same time, a detailed understanding of interruptions
in settings where people wearing an HMD (HMD user) and people not wearing an HMD (bystander)
is missing. We investigate (a) whether bystanders are capable of identifying when
HMD users switch tasks by observing their gestures, and hence exploit opportune moments
for interruptions, and (b) which strategies bystanders employ. In a lab study (N=64)
we found that bystanders are able to successfully identify both task switches (83%)
and tasks (77%) within only a few seconds of the task switch. Furthermore, we identified
interruption strategies of bystanders. From our results we derive implications meant
to support designers and practitioners in building HMD applications that are used
in a co-located collaborative setting.},
doi = {10.1145/3322276.3322363},
isbn = {9781450358507},
keywords = {virtual and augmented reality, gesture, hmd, interruption},
location = {San Diego, CA, USA},
numpages = {14},
owner = {florian},
timestamp = {2019.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/george2019dis.pdf},
}
M. Kattenbeck, M. A. Kilian, M. Ferstl, B. Ludwig, and F. Alt. Towards task-sensitive assistance in public spaces. Aslib Journal of Information Management, 2019.
[BibTeX] [Abstract] [PDF]
Purpose Performing tasks in public spaces can be demanding due to task complexity. Systems that can keep track of the current task state may help their users to successfully fulfill a task. These systems, however, require major implementation effort. The purpose of this paper is to investigate if and how a mobile information assistant which has only basic task-tracking capabilities can support users by employing a least effort approach. This means, we are interested in whether such a system is able to have an impact on the way a workflow in public space is perceived. Design/methodology/approach The authors implement and test AIRBOT, a mobile chatbot application that can assist air passengers in successfully boarding a plane. The authors apply a three-tier approach and, first, conduct expert and passenger interviews to understand the workflow and the information needs occurring therein; second, the authors implement a mobile chatbot application providing minimum task-tracking capabilities to support travelers by providing boarding-relevant information in a proactive manner. Finally, the authors evaluate this application by means of an in situ study (n = 101 passengers) at a major European airport. Findings The authors provide evidence that basic task-tracking capabilities are sufficient to affect the users? task perception. AIRBOT is able to decrease the perceived workload airport services impose on users. It has a negative impact on satisfaction with non-personalized information offered by the airport, though. Originality/value The study shows that the number of features is not the most important means to successfully provide assistance in public space workflows. The study can, moreover, serve as a blueprint to design task-based assistants for other contexts.
@Article{kattenbeck2019ajim,
author = {Markus Kattenbeck and Melanie A Kilian and Matthias Ferstl and Bernd Ludwig and Florian Alt},
journal = {{Aslib Journal of Information Management}},
title = {{Towards task-sensitive assistance in public spaces}},
year = {2019},
note = {kattenbeck2019ajim},
abstract = {Purpose
Performing tasks in public spaces can be demanding due to task complexity. Systems that can keep track of the current task state may help their users to successfully fulfill a task. These systems, however, require major implementation effort. The purpose of this paper is to investigate if and how a mobile information assistant which has only basic task-tracking capabilities can support users by employing a least effort approach. This means, we are interested in whether such a system is able to have an impact on the way a workflow in public space is perceived.
Design/methodology/approach
The authors implement and test AIRBOT, a mobile chatbot application that can assist air passengers in successfully boarding a plane. The authors apply a three-tier approach and, first, conduct expert and passenger interviews to understand the workflow and the information needs occurring therein; second, the authors implement a mobile chatbot application providing minimum task-tracking capabilities to support travelers by providing boarding-relevant information in a proactive manner. Finally, the authors evaluate this application by means of an in situ study (n = 101 passengers) at a major European airport.
Findings
The authors provide evidence that basic task-tracking capabilities are sufficient to affect the users? task perception. AIRBOT is able to decrease the perceived workload airport services impose on users. It has a negative impact on satisfaction with non-personalized information offered by the airport, though.
Originality/value
The study shows that the number of features is not the most important means to successfully provide assistance in public space workflows. The study can, moreover, serve as a blueprint to design task-based assistants for other contexts.},
keywords = {Human-computer interaction, Assistance system, Cooperative problem solving, In situ study, Mobile information behaviour, Mobile information needs},
publisher = {Emerald Publishing Limited},
timestamp = {2019.06.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/kattenbeck2019ajim.},
}
K. Pfeuffer, M. Geiger, S. Prange, L. Mecke, D. Buschek, and F. Alt. Behavioural Biometrics in VR – Identifying People from Body Motion and Relations in Virtual Reality. In Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems (CHI ’19), ACM, New York, NY, USA, 2019.
[BibTeX] [Abstract] [PDF]
This paper investigates personalized voice characters for incarspeech interfaces. In particular, we report on how wedesigned different personalities for voice assistants and comparedthem in a real world driving study. Voice assistantshave become important for a wide range of use cases, yet current interfaces are using the same style of auditory responsein every situation, despite varying user needs andpersonalities. To close this gap, we designed four assistantpersonalities (Friend, Admirer, Aunt, and Butler) and comparedthem to a baseline (Default) in a between-subject studyin real traffic conditions. Our results show higher likabilityand trust for assistants that correctly match the user’s personalitywhile we observed lower likability, trust, satisfaction,and usefulness for incorrectly matched personalities, eachin comparison with the Default character. We discuss designaspects for voice assistants in different automotive use cases.
@InProceedings{pfeuffer2019chi,
author = {Ken Pfeuffer AND Matthias Geiger AND Sarah Prange AND Lukas Mecke AND Daniel Buschek AND Florian Alt},
booktitle = {{Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}},
title = {{Behavioural Biometrics in VR - Identifying People from Body Motion and Relations in Virtual Reality}},
year = {2019},
address = {New York, NY, USA},
note = {pfeuffer2019chi},
publisher = {ACM},
series = {CHI '19},
abstract = {This paper investigates personalized voice characters for incarspeech interfaces. In particular, we report on how wedesigned different personalities for voice assistants and comparedthem in a real world driving study. Voice assistantshave become important for a wide range of use cases, yet current interfaces are using the same style of auditory responsein every situation, despite varying user needs andpersonalities. To close this gap, we designed four assistantpersonalities (Friend, Admirer, Aunt, and Butler) and comparedthem to a baseline (Default) in a between-subject studyin real traffic conditions. Our results show higher likabilityand trust for assistants that correctly match the user’s personalitywhile we observed lower likability, trust, satisfaction,and usefulness for incorrectly matched personalities, eachin comparison with the Default character. We discuss designaspects for voice assistants in different automotive use cases.},
keywords = {Virtual Reality, Behavioural Biometrics, Motion, Relation, Proprioception, Adaptive UIs},
location = {Glasgow, UK},
numpages = {11},
timestamp = {2019.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/pfeuffer2019chi.pdf},
}
M. Braun and F. Alt. Affective Assistants: a Matter ofStates and Traits. In Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems (CHI EA’19), ACM, New York, NY, USA, 2019.
[BibTeX] [Abstract] [PDF]
This work presents a model for the development of affective assistants based on the pillars of userstates and traits. Traits are defined as long-term qualities like personality, personal experiences,preferences, and demographics, while the user state comprises cognitive load, emotional states, andphysiological parameters. We discuss useful input values and the necessary developments for anadvancement of affective assistants with the example of an affective in-car voice assistant.
@InProceedings{braun2019chiea,
author = {Michael Braun AND Florian Alt},
title = {{Affective Assistants: a Matter ofStates and Traits}},
booktitle = {{Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems}},
year = {2019},
series = {CHI EA'19},
address = {New York, NY, USA},
publisher = {ACM},
note = {braun2019chiea},
abstract = {This work presents a model for the development of affective assistants based on the pillars of userstates and traits. Traits are defined as long-term qualities like personality, personal experiences,preferences, and demographics, while the user state comprises cognitive load, emotional states, andphysiological parameters. We discuss useful input values and the necessary developments for anadvancement of affective assistants with the example of an affective in-car voice assistant.},
comment = {braun2019chiea},
location = {Glasgow, UK},
timestamp = {2019.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/braun2019chiea.pdf},
}
M. Braun, A. Mainz, R. Chadowitz, B. Pfleging, and F. Alt. At Your Service: Designing Voice AssistantPersonalities to Improve Automotive User Interfaces. In Proceedings of the 2019 chi conference on human factors in computing systems (), Association for Computing Machinery, New York, NY, USA, 2019, p. 1–11. doi:10.1145/3290605.3300270
[BibTeX] [Abstract] [PDF] [Talk]
This paper investigates personalized voice characters for in-car speech interfaces. In particular, we report on how we designed different personalities for voice assistants and compared them in a real world driving study. Voice assistants have become important for a wide range of use cases, yet current interfaces are using the same style of auditory response in every situation, despite varying user needs and personalities. To close this gap, we designed four assistant personalities (Friend, Admirer, Aunt, and Butler) and compared them to a baseline (Default) in a between-subject study in real traffic conditions. Our results show higher likability and trust for assistants that correctly match the user’s personality while we observed lower likability, trust, satisfaction, and usefulness for incorrectly matched personalities, each in comparison with the Default character. We discuss design aspects for voice assistants in different automotive use cases.
@InProceedings{braun2019chi,
author = {Michael Braun AND Anja Mainz AND Ronee Chadowitz AND Bastian Pfleging AND Florian Alt},
booktitle = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems},
year = {2019},
address = {New York, NY, USA},
note = {braun2019chi},
pages = {1–11},
publisher = {Association for Computing Machinery},
abstract = {This paper investigates personalized voice characters for in-car speech interfaces. In particular, we report on how we designed different personalities for voice assistants and compared them in a real world driving study. Voice assistants have become important for a wide range of use cases, yet current interfaces are using the same style of auditory response in every situation, despite varying user needs and personalities. To close this gap, we designed four assistant personalities (Friend, Admirer, Aunt, and Butler) and compared them to a baseline (Default) in a between-subject study in real traffic conditions. Our results show higher likability and trust for assistants that correctly match the user's personality while we observed lower likability, trust, satisfaction, and usefulness for incorrectly matched personalities, each in comparison with the Default character. We discuss design aspects for voice assistants in different automotive use cases.},
comment = {braun2019chi},
doi = {10.1145/3290605.3300270},
isbn = {9781450359702},
numpages = {11},
timestamp = {2019.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019chi.pdf},
}
S. Faltaous, G. Haas, L. Barrios, A. Seiderer, S. F. Rauh, H. J. Chae, S. Schneegass, and F. Alt. BrainShare: A Glimpse of SocialInteraction for Locked-in Syndrome Patients. In Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems (CHI EA ’19), Association for Computing Machinery, New York, NY, USA, 2019, p. 1–6. doi:10.1145/3290607.3312754
[BibTeX] [Abstract] [PDF]
Locked-in syndrome (LIS) patients are partially or entirely paralyzed but fully conscious. Those patients report a high quality of life and desire to remain active in their society and families. We propose a system for enhancing social interactions of LIS patients with their families and friends with the goal of improving their overall quality of life. Our system comprises a Brain-Computer Interface (BCI), augmented-reality glasses, and a screen that shares the view of a caretaker with the patient. This setting targets both patients and caretakers: (1) it allows the patient to experience the outside world through the eyes of the caretaker and (2) it creates a way of active communication between patient and caretaker to convey needs and advice. To validate our approach, we showcased our prototype and conducted interviews that demonstrate the potential benefit for affected patients.
@InProceedings{faltaous2019chiea,
author = {Sarah Faltaous AND Gabriel Haas AND Liliana Barrios AND Andreas Seiderer AND Sebastian Felix Rauh AND Han Joo Chae AND Stefan Schneegass AND Florian Alt},
booktitle = {{Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems}},
title = {{BrainShare: A Glimpse of SocialInteraction for Locked-in Syndrome Patients}},
year = {2019},
address = {New York, NY, USA},
note = {faltaous2019chiea},
pages = {1–6},
publisher = {Association for Computing Machinery},
series = {CHI EA '19},
abstract = {Locked-in syndrome (LIS) patients are partially or entirely paralyzed but fully conscious. Those patients report a high quality of life and desire to remain active in their society and families. We propose a system for enhancing social interactions of LIS patients with their families and friends with the goal of improving their overall quality of life. Our system comprises a Brain-Computer Interface (BCI), augmented-reality glasses, and a screen that shares the view of a caretaker with the patient. This setting targets both patients and caretakers: (1) it allows the patient to experience the outside world through the eyes of the caretaker and (2) it creates a way of active communication between patient and caretaker to convey needs and advice. To validate our approach, we showcased our prototype and conducted interviews that demonstrate the potential benefit for affected patients.},
comment = {faltaous2019chiea},
doi = {10.1145/3290607.3312754},
isbn = {9781450359719},
keywords = {brain-computer interaction, augmented reality, locked-in syndrome, social interaction},
location = {Glasgow, Scotland Uk},
numpages = {6},
timestamp = {2019.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/faltaous2019chiea.pdf},
}
S. Prange, D. Buschek, K. Pfeuffer, L. Mecke, P. Ehrich, J. Le, and F. Alt. Go for GOLD: Investigating UserBehaviour in Goal-Oriented Tasks. In Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems (CHI EA’19), ACM, New York, NY, USA, 2019.
[BibTeX] [Abstract] [PDF]
Building adaptive support systems requires a deep understanding of why users get stuck or faceproblems during a goal-oriented task and how they perceive such situations. To investigate this, wefirst chart a problem space, comprising different problem characteristics (complexity, time, availablemeans, and consequences). Secondly, we map them to LEGO assembly tasks. We apply these in alab study equipped with several tracking technologies (i.e., smartwatch sensors and an OptiTracksetup) to assess which problem characteristics lead to measurable consequences in user behaviour.Participants rated occurred problems after each task. With this work, we suggest first steps towardsa) understanding user behaviour in problem situation and b) building upon this knowledge to informthe design of adaptive support systems. As a result, we provide the GOLD dataset (Goal-OrientedLego Dataset) for further analysis.
@InProceedings{prange2019chiea,
author = {Sarah Prange AND Daniel Buschek AND Ken Pfeuffer AND Lukas Mecke AND Peter Ehrich AND Jens Le AND Florian Alt},
title = {{Go for GOLD: Investigating UserBehaviour in Goal-Oriented Tasks}},
booktitle = {{Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems}},
year = {2019},
series = {CHI EA'19},
address = {New York, NY, USA},
publisher = {ACM},
note = {prange2019chiea},
abstract = {Building adaptive support systems requires a deep understanding of why users get stuck or faceproblems during a goal-oriented task and how they perceive such situations. To investigate this, wefirst chart a problem space, comprising different problem characteristics (complexity, time, availablemeans, and consequences). Secondly, we map them to LEGO assembly tasks. We apply these in alab study equipped with several tracking technologies (i.e., smartwatch sensors and an OptiTracksetup) to assess which problem characteristics lead to measurable consequences in user behaviour.Participants rated occurred problems after each task. With this work, we suggest first steps towardsa) understanding user behaviour in problem situation and b) building upon this knowledge to informthe design of adaptive support systems. As a result, we provide the GOLD dataset (Goal-OrientedLego Dataset) for further analysis.},
comment = {prange2019chiea},
location = {Glasgow, UK},
timestamp = {2019.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2019chiea.pdf},
}
S. Prange, C. Tiefenau, E. von Zezschwitz, and F. Alt. Towards Understanding User Interaction in Future Smart Homes. In Proceedings of CHI ’19 Workshop on New Directions for the IoT: Automate, Share, Build, and Care (CHI ’19 Workshop), ACM, New York, NY, USA, 2019.
[BibTeX] [Abstract] [PDF]
IoT devices are currently finding their way into peoples homes, providing rich functionality by means of various interaction modalities. We see great potential in collecting and analysing data about users’ interaction with their smart home devices to gain insights about their daily life behaviour for self-reflection as well as security purposes. We present a methodology to study interaction with IoT devices in users’ (smart) homes. Logging daily behaviour usually comes with high effort and often interrupts natural interaction. Hence, we suggest an unobtrusive logging approach by means of a smartwatch and NFC technology. Participants scan interaction with devices using self-placed NFC tags. We tested our method with two flat shares in two cities and provide preliminary insights with regards to the strengths and weaknesses of our study approach.
@InProceedings{prange2019iot,
author = {Sarah Prange AND Christian Tiefenau AND Emanuel von Zezschwitz AND Florian Alt},
booktitle = {{Proceedings of CHI '19 Workshop on New Directions for the IoT: Automate, Share, Build, and Care}},
title = {{Towards Understanding User Interaction in Future Smart Homes}},
year = {2019},
address = {New York, NY, USA},
note = {prange2019iot},
publisher = {ACM},
series = {CHI '19 Workshop},
abstract = {IoT devices are currently finding their way into peoples homes, providing rich functionality by means of various interaction modalities. We see great potential in collecting and analysing data about users' interaction with their smart home devices to gain insights about their daily life behaviour for self-reflection as well as security purposes. We present a methodology to study interaction with IoT devices in users' (smart) homes. Logging daily behaviour usually comes with high effort and often interrupts natural interaction. Hence, we suggest an unobtrusive logging approach by means of a smartwatch and NFC technology. Participants scan interaction with devices using self-placed NFC tags. We tested our method with two flat shares in two cities and provide preliminary insights with regards to the strengths and weaknesses of our study approach.},
keywords = {IoT, Internet of Things, Smart Home, Smart Devices, NFC, Android, Field Study, Data Collection, In-the-wild},
location = {Glasgow, UK},
numpages = {5},
owner = {florian},
timestamp = {2019.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2019iot.pdf},
}
M. Braun, N. Broy, B. Pfleging, and F. Alt. Visualizing natural language interaction for conversational in-vehicle information systems to minimize driver distraction. Journal on Multimodal User Interfaces, vol. 13, iss. 2, p. 71–88, 2019. doi:10.1007/s12193-019-00301-2
[BibTeX] [Abstract] [PDF]
In this paper we investigate how natural language interfaces can be integrated with cars in a way such that their influence on driving performance is being minimized. In particular, we focus on how speech-based interaction can be supported through a visualization of the conversation. Our work is motivated by the fact that speech interfaces (like Alexa, Siri, Cortana, etc.) are increasingly finding their way into our everyday life. We expect such interfaces to become commonplace in vehicles in the future. Cars are a challenging environment, since speech interaction here is a secondary task that should not negatively affect the primary task, that is driving. At the outset of our work, we identify the design space for such interfaces. We then compare different visualization concepts in a driving simulator study with 64 participants. Our results yield that (1) text summaries support drivers in recalling information and enhances user experience but can also increase distraction, (2) the use of keywords minimizes cognitive load and influence on driving performance, and (3) the use of icons increases the attractiveness of the interface.
@Article{braun2019JMUI,
author = {Braun, Michael and Broy, Nora and Pfleging, Bastian and Alt, Florian},
journal = {{Journal on Multimodal User Interfaces}},
title = {{Visualizing natural language interaction for conversational in-vehicle information systems to minimize driver distraction}},
year = {2019},
issn = {1783-8738},
month = jun,
note = {braun2019JMUI},
number = {2},
pages = {71--88},
volume = {13},
abstract = {In this paper we investigate how natural language interfaces can be integrated with cars in a way such that their influence on driving performance is being minimized. In particular, we focus on how speech-based interaction can be supported through a visualization of the conversation. Our work is motivated by the fact that speech interfaces (like Alexa, Siri, Cortana, etc.) are increasingly finding their way into our everyday life. We expect such interfaces to become commonplace in vehicles in the future. Cars are a challenging environment, since speech interaction here is a secondary task that should not negatively affect the primary task, that is driving. At the outset of our work, we identify the design space for such interfaces. We then compare different visualization concepts in a driving simulator study with 64 participants. Our results yield that (1) text summaries support drivers in recalling information and enhances user experience but can also increase distraction, (2) the use of keywords minimizes cognitive load and influence on driving performance, and (3) the use of icons increases the attractiveness of the interface.},
day = {01},
doi = {10.1007/s12193-019-00301-2},
owner = {florian},
timestamp = {2019.04.16},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019JMUI.pdf},
}
M. Braun, J. Schubert, B. Pfleging, and F. Alt. Improving Driver Emotions with Affective Strategies. Multimodal Technologies and Interaction, vol. 3, iss. 1, 2019. doi:10.3390/mti3010021
[BibTeX] [Abstract] [PDF]
Drivers in negative emotional states, such as anger or sadness, are prone to perform bad at driving, decreasing overall road safety for all road users. Recent advances in affective computing, however, allow for the detection of such states and give us tools to tackle the connected problems within automotive user interfaces. We see potential in building a system which reacts upon possibly dangerous driver states and influences the driver in order to drive more safely. We compare different interaction approaches for an affective automotive interface, namely Ambient Light, Visual Notification, a Voice Assistant, and an Empathic Assistant. Results of a simulator study with 60 participants (30 each with induced sadness/anger) indicate that an emotional voice assistant with the ability to empathize with the user is the most promising approach as it improves negative states best and is rated most positively. Qualitative data also shows that users prefer an empathic assistant but also resent potential paternalism. This leads us to suggest that digital assistants are a valuable platform to improve driver emotions in automotive environments and thereby enable safer driving.
@Article{braun2019mti,
author = {Braun, Michael and Schubert, Jonas and Pfleging, Bastian and Alt, Florian},
journal = {{Multimodal Technologies and Interaction}},
title = {{Improving Driver Emotions with Affective Strategies}},
year = {2019},
issn = {2414-4088},
note = {braun2019mti},
number = {1},
volume = {3},
abstract = {Drivers in negative emotional states, such as anger or sadness, are prone to perform bad at driving, decreasing overall road safety for all road users. Recent advances in affective computing, however, allow for the detection of such states and give us tools to tackle the connected problems within automotive user interfaces. We see potential in building a system which reacts upon possibly dangerous driver states and influences the driver in order to drive more safely. We compare different interaction approaches for an affective automotive interface, namely Ambient Light, Visual Notification, a Voice Assistant, and an Empathic Assistant. Results of a simulator study with 60 participants (30 each with induced sadness/anger) indicate that an emotional voice assistant with the ability to empathize with the user is the most promising approach as it improves negative states best and is rated most positively. Qualitative data also shows that users prefer an empathic assistant but also resent potential paternalism. This leads us to suggest that digital assistants are a valuable platform to improve driver emotions in automotive environments and thereby enable safer driving.},
article-number = {21},
doi = {10.3390/mti3010021},
owner = {florian},
timestamp = {2019.04.16},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019mti.pdf},
}
R. Häuslschmid, D. Ren, F. Alt, A. Butz, and T. Höllerer. Personalizing Content Presentation on Large 3D Head-Up Displays. PRESENCE: Virtual and Augmented Reality, vol. 27, iss. 1, pp. 80-106, 2019. doi:10.1162/pres_a_00315
[BibTeX] [Abstract] [PDF]
Drivers’ urge to access content on smartphones while driving causes a high number of fatal accidents every year. We explore 3D full-windshield size head-up displays as an opportunity to present such content in a safer manner. In particular, we look into how drivers would personalize such displays and whether it can be considered safe. Firstly, by means of an online survey we identify types of content users access on their smartphones while driving and whether users are interested in the same content on a head-up display. Secondly, we let drivers design personalized 3D layouts and assess how personalization impacts on driving safety. Thirdly, we compare personalized layouts to a one-fits-all layout concept in a 3D driving simulator study regarding safety. We found that drivers’ content preferences diverge largely and that most of the personalized layouts do not respect safety sufficiently. The one-fits-all layout led to a better response performance but needs to be modified to consider the drivers’ preferences. We discuss the implications of the presented research on road safety and future 3D information placement on head-up displays.
@Article{haeuslschmid2019mti,
author = {H\"{a}uslschmid, Renate and Ren, Donhao and Alt, Florian and Butz, Andreas and H\"{o}llerer, Tobias},
journal = {{PRESENCE: Virtual and Augmented Reality}},
title = {{Personalizing Content Presentation on Large 3D Head-Up Displays}},
year = {2019},
note = {haeuslschmid2019mti},
number = {1},
pages = {80-106},
volume = {27},
abstract = {Drivers' urge to access content on smartphones while driving causes a high number of fatal accidents every year. We explore 3D full-windshield size head-up displays as an opportunity to present such content in a safer manner. In particular, we look into how drivers would personalize such displays and whether it can be considered safe. Firstly, by means of an online survey we identify types of content users access on their smartphones while driving and whether users are interested in the same content on a head-up display. Secondly, we let drivers design personalized 3D layouts and assess how personalization impacts on driving safety. Thirdly, we compare personalized layouts to a one-fits-all layout concept in a 3D driving simulator study regarding safety. We found that drivers' content preferences diverge largely and that most of the personalized layouts do not respect safety sufficiently. The one-fits-all layout led to a better response performance but needs to be modified to consider the drivers' preferences. We discuss the implications of the presented research on road safety and future 3D information placement on head-up displays.},
doi = {10.1162/pres\_a\_00315},
eprint = {https://www.mitpressjournals.org/doi/pdf/10.1162/pres_a_00315},
owner = {florian},
timestamp = {2019.04.16},
url = {http://www.florian-alt.org/unibw/wp-content/publications/haeuslschmid2019mti.pdf},
}

### 2018

D. Buschek, M. Hassib, and F. Alt. Personal Mobile Messaging in Context: Chat Augmentations for Expressiveness and Awareness. ACM Transaytions on Computer-Human Interaction (ToCHI), vol. 25, iss. 4, p. 23:1–23:33, 2018. doi:10.1145/3201404
[BibTeX] [Abstract] [PDF]
Mobile text messaging is one of the most important communication channels today, but it suffers from lack of expressiveness, context and emotional awareness, compared to face-to-face communication. We address this problem by augmenting text messaging with information about users and contexts. We present and reflect on lessons learned from three field studies, in which we deployed augmentation concepts as prototype chat apps in users’ daily lives. We studied (1) subtly conveying context via dynamic font personalisation (TapScript), (2) integrating and sharing physiological data – namely heart rate – implicitly or explicitly (HeartChat) and (3) automatic annotation of various context cues: music, distance, weather and activities (ContextChat). Based on our studies, we discuss chat augmentation with respect to privacy concerns, understandability, connectedness and inferring context in addition to methodological lessons learned. Finally, we propose a design space for chat augmentation to guide future research, and conclude with practical design implications.
@Article{buschek2018tochi,
author = {Buschek, Daniel and Hassib, Mariam and Alt, Florian},
journal = {{ACM Transaytions on Computer-Human Interaction (ToCHI)}},
title = {{Personal Mobile Messaging in Context: Chat Augmentations for Expressiveness and Awareness}},
year = {2018},
issn = {1073-0516},
month = aug,
note = {buschek2018tochi},
number = {4},
pages = {23:1--23:33},
volume = {25},
abstract = {Mobile text messaging is one of the most important communication channels today, but it suffers from lack of expressiveness, context and emotional awareness, compared to face-to-face communication. We address this problem by augmenting text messaging with information about users and contexts. We present and reflect on lessons learned from three field studies, in which we deployed augmentation concepts as prototype chat apps in users’ daily lives. We studied (1) subtly conveying context via dynamic font personalisation (TapScript), (2) integrating and sharing physiological data – namely heart rate – implicitly or explicitly (HeartChat) and (3) automatic annotation of various context cues: music, distance, weather and activities (ContextChat). Based on our studies, we discuss chat augmentation with respect to privacy concerns, understandability, connectedness and inferring context in addition to methodological lessons learned. Finally, we propose a design space for chat augmentation to guide future research, and conclude with practical design implications.},
acmid = {3201404},
address = {New York, NY, USA},
articleno = {23},
doi = {10.1145/3201404},
issue_date = {August 2018},
keywords = {Mobile text messaging, chat context, heart rate, mobile device sensors},
numpages = {33},
publisher = {ACM},
timestamp = {2019.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2018tochi.pdf},
}
M. Braun, B. Pfleging, and F. Alt. A Survey to Understand Emotional Situations on the Road and What They Mean for Affective Automotive UIs. Multimodal Technologies and Interaction, vol. 2, iss. 4, 2018. doi:10.3390/mti2040075
[BibTeX] [Abstract] [PDF]
In this paper, we present the results of an online survey (N = 170) on emotional situations on the road. In particular, we asked potential early adopters to remember a situation where they felt either an intense positive or negative emotion while driving. Our research is motivated by imminent disruptions in the automotive sector due to automated driving and the accompanying switch to selling driving experiences over horsepower. This creates a need to focus on the driver’s emotion when designing in-car interfaces. As a result of our research, we present a set of propositions for affective car interfaces based on real-life experiences. With our work we aim to support the design of affective car interfaces and give designers a foundation to build upon. We find respondents often connect positive emotions with enjoying their independence, while negative experiences are associated mostly with traffic behavior. Participants who experienced negative situations wished for better information management and a higher degree of automation. Drivers with positive emotions generally wanted to experience the situation more genuinely, for example, by switching to a “back-to-basic” mode. We explore these statements and discuss recommendations for the design of affective interfaces in future cars.
@Article{braun2018mti,
author = {Braun, Michael and Pfleging, Bastian and Alt, Florian},
journal = {{Multimodal Technologies and Interaction}},
title = {{A Survey to Understand Emotional Situations on the Road and What They Mean for Affective Automotive UIs}},
year = {2018},
issn = {2414-4088},
note = {braun2018mti},
number = {4},
volume = {2},
abstract = {In this paper, we present the results of an online survey (N = 170) on emotional situations on the road. In particular, we asked potential early adopters to remember a situation where they felt either an intense positive or negative emotion while driving. Our research is motivated by imminent disruptions in the automotive sector due to automated driving and the accompanying switch to selling driving experiences over horsepower. This creates a need to focus on the driver’s emotion when designing in-car interfaces. As a result of our research, we present a set of propositions for affective car interfaces based on real-life experiences. With our work we aim to support the design of affective car interfaces and give designers a foundation to build upon. We find respondents often connect positive emotions with enjoying their independence, while negative experiences are associated mostly with traffic behavior. Participants who experienced negative situations wished for better information management and a higher degree of automation. Drivers with positive emotions generally wanted to experience the situation more genuinely, for example, by switching to a “back-to-basic” mode. We explore these statements and discuss recommendations for the design of affective interfaces in future cars.},
article-number = {75},
doi = {10.3390/mti2040075},
publisher = {MDPI},
timestamp = {2018.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2018mti.pdf},
}
M. Braun, S. T. Völkel, G. Wiegand, T. Puls, D. Steidl, Y. Weiß, and F. Alt. The Smile is The New Like: Controlling Music with Facial Expressions to Minimize Driver Distraction. In Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia (MUM 2018), ACM, New York, NY, USA, 2018, p. 383–389. doi:10.1145/3282894.3289729
[BibTeX] [Abstract] [PDF]
The control of user interfaces while driving is a textbook example for driver distraction. Modern in-car interfaces are growing in complexity and visual demand, yet they need to stay simple enough to handle while driving. One common approach to solve this problem are multimodal interfaces, incorporating e.g. touch, speech, and mid-air gestures for the control of distinct features. This allows for an optimization of used cognitive resources and can relieve the driver of potential overload. We introduce a novel modality for in-car interaction: our system allows drivers to use facial expressions to control a music player. The results of a user study show that both implicit emotion recognition and explicit facial expressions are applicable for music control in cars. Subconscious emotion recognition could decrease distraction, while explicit expressions can be used as an alternative input modality. A simple smiling gesture showed good potential, e.g. to save favorite songs.
@InProceedings{braun18mumadj,
author = {Braun, Michael and V\"{o}lkel, Sarah Theres and Wiegand, Gesa and Puls, Thomas and Steidl, Daniel and Wei\ss, Yannick and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{The Smile is The New Like: Controlling Music with Facial Expressions to Minimize Driver Distraction}},
year = {2018},
address = {New York, NY, USA},
pages = {383--389},
publisher = {ACM},
series = {MUM 2018},
abstract = {The control of user interfaces while driving is a textbook example for driver distraction. Modern in-car interfaces are growing in complexity and visual demand, yet they need to stay simple enough to handle while driving. One common approach to solve this problem are multimodal interfaces, incorporating e.g. touch, speech, and mid-air gestures for the control of distinct features. This allows for an optimization of used cognitive resources and can relieve the driver of potential overload. We introduce a novel modality for in-car interaction: our system allows drivers to use facial expressions to control a music player. The results of a user study show that both implicit emotion recognition and explicit facial expressions are applicable for music control in cars. Subconscious emotion recognition could decrease distraction, while explicit expressions can be used as an alternative input modality. A simple smiling gesture showed good potential, e.g. to save favorite songs.},
acmid = {3289729},
doi = {10.1145/3282894.3289729},
isbn = {978-1-4503-6594-9},
keywords = {Affective Computing, Automotive User Interfaces, Driver Distraction, Face Recognition, Multimodal Interaction},
location = {Cairo, Egypt},
numpages = {7},
timestamp = {2018.11.28},
}
S. Prange, D. Buschek, and F. Alt. An Exploratory Study on Correlations of Hand Size and Mobile Touch Interactions. In Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia (MUM’18), ACM, New York, NY, USA, 2018, p. 279–283. doi:10.1145/3282894.3282924
[BibTeX] [Abstract] [PDF]
We report on an exploratory study investigating the relationship of users’ hand sizes and aspects of their mobile touch interactions. Estimating hand size from interaction could inform, for example, UI adaptation, occlusion-aware UIs, and biometrics. We recorded touch data from 62 participants performing six touch tasks on a smartphone. Our results reveal considerable correlations between hand size and aspects of touch interaction, both for tasks with unrestricted “natural” postures and restricted hand locations. We discuss implications for applications and ideas for future work.
@InProceedings{prange2018mum,
author = {Prange, Sarah and Buschek, Daniel and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{An Exploratory Study on Correlations of Hand Size and Mobile Touch Interactions}},
year = {2018},
address = {New York, NY, USA},
note = {prange2018mum},
pages = {279--283},
publisher = {ACM},
series = {MUM'18},
abstract = {We report on an exploratory study investigating the relationship of users' hand sizes and aspects of their mobile touch interactions. Estimating hand size from interaction could inform, for example, UI adaptation, occlusion-aware UIs, and biometrics. We recorded touch data from 62 participants performing six touch tasks on a smartphone. Our results reveal considerable correlations between hand size and aspects of touch interaction, both for tasks with unrestricted "natural" postures and restricted hand locations. We discuss implications for applications and ideas for future work.},
acmid = {3282924},
doi = {10.1145/3282894.3282924},
isbn = {978-1-4503-6594-9},
keywords = {Correlation, Hand Size, Scrolling, Swiping, Targeting, Touch},
location = {Cairo, Egypt},
numpages = {5},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2018mum.pdf},
}
L. Mecke, K. Pfeuffer, S. Prange, and F. Alt. Open Sesame!: User Perception of Physical, Biometric, and Behavioural Authentication Concepts to Open Doors. In Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia (MUM’18), ACM, New York, NY, USA, 2018, p. 153–159. doi:10.1145/3282894.3282923
[BibTeX] [Abstract] [PDF]
In usable security (e.g., smartphone authentication), a lot of emphasis is put on low-effort authentication and access concepts. Yet, only very few approaches exist where such concepts are applied beyond digital devices. We investigate and explore seamless authentication systems at doors, where most currently used systems for seamless access rely on the use of tokens. In a Wizard-of-Oz study, we investigate three different authentication schemes, namely (1) key, (2) palm vein scanner and (3) gait-based authentication (compare Fig. 1). Most participants in our study (N=15) preferred the palm vein scanner, while ranking unlocking with a key and gait-based recognition second and third. Our results propose that recovery costs for a failed authentication attempt have an impact on user perception. Furthermore, while the participants appreciated seamless authentication via biometrics, they also valued the control they gain from the possession of a physical token.
@InProceedings{mecke2018mum,
author = {Mecke, Lukas and Pfeuffer, Ken and Prange, Sarah and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Open Sesame!: User Perception of Physical, Biometric, and Behavioural Authentication Concepts to Open Doors}},
year = {2018},
address = {New York, NY, USA},
note = {mecke2018mum},
pages = {153--159},
publisher = {ACM},
series = {MUM'18},
abstract = {In usable security (e.g., smartphone authentication), a lot of emphasis is put on low-effort authentication and access concepts. Yet, only very few approaches exist where such concepts are applied beyond digital devices. We investigate and explore seamless authentication systems at doors, where most currently used systems for seamless access rely on the use of tokens. In a Wizard-of-Oz study, we investigate three different authentication schemes, namely (1) key, (2) palm vein scanner and (3) gait-based authentication (compare Fig. 1). Most participants in our study (N=15) preferred the palm vein scanner, while ranking unlocking with a key and gait-based recognition second and third. Our results propose that recovery costs for a failed authentication attempt have an impact on user perception. Furthermore, while the participants appreciated seamless authentication via biometrics, they also valued the control they gain from the possession of a physical token.},
acmid = {3282923},
doi = {10.1145/3282894.3282923},
isbn = {978-1-4503-6594-9},
keywords = {(Behavioural) Biometrics, Authentication, User Perception, Wizard-of-Oz},
location = {Cairo, Egypt},
numpages = {7},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2018mum.pdf},
}
N. Müller, B. Eska, R. Schäffer, S. T. Völkel, M. Braun, G. Wiegand, and F. Alt. Arch’N’Smile: A Jump’N’Run Game Using Facial Expression Recognition Control For Entertaining Children During Car Journeys. In Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia (MUM’18), ACM, New York, NY, USA, 2018, p. 335–339. doi:10.1145/3282894.3282918
[BibTeX] [Abstract] [PDF]
Children can be a distraction to the driver during a car ride. With our work, we try to combine the possibility of facial expression recognition in the car with a game for children. The goal is that the parents can focus on the driving task while the child is busy and entertained. We conducted a study with children and parents in a real driving situation. It turned out that children can handle and enjoy games with facial recognition controls, which leads us to the conclusion that face recognition in the car as a entertaining system for children should be developed further to exploit its full potential.
@InProceedings{mueller2018mum,
author = {M\"{u}ller, Niklas and Eska, Bettina and Sch\"{a}ffer, Richard and V\"{o}lkel, Sarah Theres and Braun, Michael and Wiegand, Gesa and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Arch'N'Smile: A Jump'N'Run Game Using Facial Expression Recognition Control For Entertaining Children During Car Journeys}},
year = {2018},
address = {New York, NY, USA},
note = {mueller2018mum},
pages = {335--339},
publisher = {ACM},
series = {MUM'18},
abstract = {Children can be a distraction to the driver during a car ride. With our work, we try to combine the possibility of facial expression recognition in the car with a game for children. The goal is that the parents can focus on the driving task while the child is busy and entertained. We conducted a study with children and parents in a real driving situation. It turned out that children can handle and enjoy games with facial recognition controls, which leads us to the conclusion that face recognition in the car as a entertaining system for children should be developed further to exploit its full potential.},
acmid = {3282918},
doi = {10.1145/3282894.3282918},
isbn = {978-1-4503-6594-9},
keywords = {Children, Distraction, Driving, Entertainment, Face Recognition, Facial Expression, Game},
location = {Cairo, Egypt},
numpages = {5},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2018mum.pdf},
}
H. Drewes, M. Khamis, and F. Alt. Smooth Pursuit Target Speeds and Trajectories. In Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia (MUM’18), ACM, New York, NY, USA, 2018, p. 139–146. doi:10.1145/3282894.3282913
[BibTeX] [Abstract] [PDF]
In this paper we present an investigation of how the speed and trajectory of smooth pursuits targets impact on detection rates in gaze interfaces. Previous work optimized these values for the specific application for which smooth pursuit eye movements were employed. However, this may not always be possible. For example UI designers may want to minimize distraction caused by the stimulus, integrate it with a certain UI element (e.g., a button), or limit it to a certain area of the screen. In these cases an in-depth understanding of the interplay between speed, trajectory, and accuracy is required. To achieve this, we conducted a user study with 15 participants who had to follow targets with different speeds and on different trajectories using their gaze. We evaluated the data with respect to detectability. As a result, we obtained reasonable ranges for target speeds and demonstrate the effects of trajectory shapes. We show that slow moving targets are hard to detect by correlation and that introducing a delay improves the detection rate for fast moving targets. Our research is complemented by design rules which enable designers to implement better pursuit detectors and pursuit-based user interfaces.
@InProceedings{drewes2018mum,
author = {Drewes, Heiko and Khamis, Mohamed and Alt, Florian},
booktitle = {{Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia}},
title = {{Smooth Pursuit Target Speeds and Trajectories}},
year = {2018},
address = {New York, NY, USA},
note = {drewes2018mum},
pages = {139--146},
publisher = {ACM},
series = {MUM'18},
abstract = {In this paper we present an investigation of how the speed and trajectory of smooth pursuits targets impact on detection rates in gaze interfaces. Previous work optimized these values for the specific application for which smooth pursuit eye movements were employed. However, this may not always be possible. For example UI designers may want to minimize distraction caused by the stimulus, integrate it with a certain UI element (e.g., a button), or limit it to a certain area of the screen. In these cases an in-depth understanding of the interplay between speed, trajectory, and accuracy is required. To achieve this, we conducted a user study with 15 participants who had to follow targets with different speeds and on different trajectories using their gaze. We evaluated the data with respect to detectability. As a result, we obtained reasonable ranges for target speeds and demonstrate the effects of trajectory shapes. We show that slow moving targets are hard to detect by correlation and that introducing a delay improves the detection rate for fast moving targets. Our research is complemented by design rules which enable designers to implement better pursuit detectors and pursuit-based user interfaces.},
acmid = {3282913},
doi = {10.1145/3282894.3282913},
isbn = {978-1-4503-6594-9},
keywords = {Eye tracking, pursuit detection, smooth pursuits, trajectories},
location = {Cairo, Egypt},
numpages = {8},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/drewes2018mum.pdf},
}
C. Mai, T. Wiltzius, F. Alt, and H. Huß{}mann. Feeling alone in public: investigating the influence of spatial layout on users’ vr experience. In Proceedings of the 10th nordic conference on human-computer interaction (NordiCHI ’18), Association for Computing Machinery, New York, NY, USA, 2018, p. 286–298. doi:10.1145/3240167.3240200
[BibTeX] [Abstract] [PDF]
We investigate how spatial layout in public environments like workplaces, fairs, or conferences influences a user’s VR experience. In particular, we compare environments in which an HMD user is (a) surrounded by other people, (b) physically separated by a barrier, or (c) in a separate room. In contrast to lab environments, users in public environments are affected by physical threats (for example, other people in the space running into them) but also cognitive threats (for example, not knowing, what happens in the real world), as known from research on proxemics or social facilitation. We contribute an extensive discussion of the factors influencing a user’s VR experience in public. Based on this we conducted a between-subject design user study (N=58) to understand the differences between the three environments. As a result, we present implications regarding (1) spatial layout, (2) behavior of the VR system operator, and (3) the VR experience that helps both HCI researchers as well as practitioners to enhance users’ VR experience in public environments.
@InProceedings{mai2018nordichi,
author = {Mai, Christian and Wiltzius, Tim and Alt, Florian and Hu\ss{}mann, Heinrich},
booktitle = {Proceedings of the 10th Nordic Conference on Human-Computer Interaction},
title = {Feeling Alone in Public: Investigating the Influence of Spatial Layout on Users' VR Experience},
year = {2018},
address = {New York, NY, USA},
note = {mai2018nordichi},
pages = {286–298},
publisher = {Association for Computing Machinery},
series = {NordiCHI '18},
abstract = {We investigate how spatial layout in public environments like workplaces, fairs, or conferences influences a user's VR experience. In particular, we compare environments in which an HMD user is (a) surrounded by other people, (b) physically separated by a barrier, or (c) in a separate room. In contrast to lab environments, users in public environments are affected by physical threats (for example, other people in the space running into them) but also cognitive threats (for example, not knowing, what happens in the real world), as known from research on proxemics or social facilitation. We contribute an extensive discussion of the factors influencing a user's VR experience in public. Based on this we conducted a between-subject design user study (N=58) to understand the differences between the three environments. As a result, we present implications regarding (1) spatial layout, (2) behavior of the VR system operator, and (3) the VR experience that helps both HCI researchers as well as practitioners to enhance users' VR experience in public environments.},
doi = {10.1145/3240167.3240200},
isbn = {9781450364379},
keywords = {head-mounted displays, public spaces, user experience, virtual reality},
location = {Oslo, Norway},
numpages = {13},
timestamp = {2018.10.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mai2018nordichi.pdf},
}
M. Braun, S. Weiser, B. Pfleging, and F. Alt. A Comparison of Emotion Elicitation Methods for Affective Driving Studies. In Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications (AutomotiveUI ’18), ACM, New York, NY, USA, 2018, p. 77–81. doi:10.1145/3239092.3265945
[BibTeX] [Abstract] [PDF]
Advances in sensing technology enable the emotional state of car drivers to be captured and interfaces to be built that respond to these emotions. To evaluate such emotion-aware interfaces, researchers need to evoke certain emotional states within participants. Emotion elicitation in driving studies poses a challenge as the driving task can interfere with the elicitation task. Induced emotions also lose intensity with time and through secondary tasks. This is why we have analyzed different emotion elicitation techniques for their suitability in automotive research and compared the most promising approaches in a user study. We recommend using autobiographical recollection to induce emotions in driving studies, and suggest a way to prolong emotional states with music playback. We discuss experiences from a a driving simulator study, including solutions for addressing potential privacy issues.
@InProceedings{braun2018autouiadj1,
author = {Braun, Michael and Weiser, Simon and Pfleging, Bastian and Alt, Florian},
booktitle = {{Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications}},
title = {{A Comparison of Emotion Elicitation Methods for Affective Driving Studies}},
year = {2018},
address = {New York, NY, USA},
pages = {77--81},
publisher = {ACM},
series = {AutomotiveUI '18},
abstract = {Advances in sensing technology enable the emotional state of car drivers to be captured and interfaces to be built that respond to these emotions. To evaluate such emotion-aware interfaces, researchers need to evoke certain emotional states within participants. Emotion elicitation in driving studies poses a challenge as the driving task can interfere with the elicitation task. Induced emotions also lose intensity with time and through secondary tasks. This is why we have analyzed different emotion elicitation techniques for their suitability in automotive research and compared the most promising approaches in a user study. We recommend using autobiographical recollection to induce emotions in driving studies, and suggest a way to prolong emotional states with music playback. We discuss experiences from a a driving simulator study, including solutions for addressing potential privacy issues.},
acmid = {3265945},
doi = {10.1145/3239092.3265945},
isbn = {978-1-4503-5947-4},
keywords = {Affective Computing, Driving Studies, Emotion Elicitation},
numpages = {5},
timestamp = {2018.10.05},
}
M. Braun, Frison Anna-Katharina, S. T. Völkel, F. Alt, H. Hussmann, and A. Riener. Beyond Transportation: How to Keep Users Attached When They Are Neither Driving nor Owning Automated Cars?. In Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications (AutomotiveUI ’18), ACM, New York, NY, USA, 2018, p. 175–180. doi:10.1145/3239092.3265963
[BibTeX] [Abstract] [PDF]
The way drivers relate to cars is likely bound to change with the rise of automated vehicles and new ownership models. However, personal relationships towards products are an important part of buying decisions. Car manufacturers thus need to provide novel bonding experiences for their future customers in order to stay competitive. We introduce a vehicle attachment model based on related work from other domains. In interviews with 16 car owners we verify the approach as promising and derive four attachment types by applying the model: interviewees’ personal attachments were grounded on either self-empowering reasons, memories with the car, increased status, or a loving friendship towards their car. We propose how to address the needs of these four attachment types as a first step towards emotionally irreplaceable automated and shared vehicles.
@InProceedings{braun2018autouiadj3,
author = {Braun, Michael and Frison, Anna-Katharina, and V\"{o}lkel, Sarah Theres and Alt, Florian and Hussmann, Heinrich and Riener, Andreas},
booktitle = {{Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications}},
title = {{Beyond Transportation: How to Keep Users Attached When They Are Neither Driving nor Owning Automated Cars?}},
year = {2018},
address = {New York, NY, USA},
pages = {175--180},
publisher = {ACM},
series = {AutomotiveUI '18},
abstract = {The way drivers relate to cars is likely bound to change with the rise of automated vehicles and new ownership models. However, personal relationships towards products are an important part of buying decisions. Car manufacturers thus need to provide novel bonding experiences for their future customers in order to stay competitive. We introduce a vehicle attachment model based on related work from other domains. In interviews with 16 car owners we verify the approach as promising and derive four attachment types by applying the model: interviewees' personal attachments were grounded on either self-empowering reasons, memories with the car, increased status, or a loving friendship towards their car. We propose how to address the needs of these four attachment types as a first step towards emotionally irreplaceable automated and shared vehicles.},
acmid = {3265963},
doi = {10.1145/3239092.3265963},
isbn = {978-1-4503-5947-4},
keywords = {Automated Cars, Car Sharing, Vehicle Attachment},
numpages = {6},
timestamp = {2018.10.05},
}
M. Braun, F. Roider, F. Alt, and T. Gross. Automotive Research in the Public Space: Towards Deployment-Based Prototypes For Real Users. In Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications (AutomotiveUI ’18), ACM, New York, NY, USA, 2018, p. 181–185. doi:10.1145/3239092.3265964
[BibTeX] [Abstract] [PDF]
Many automotive user studies allow users to experience and evaluate interactive concepts. They are however often limited to small and specific groups of participants, such as students or experts. This might limit the generalizability of results for future users. A possible solution is to allow a large group of unbiased users to actively experience an interactive prototype and generate new ideas, but there is little experience about the realization and benefits of such an approach. We placed an interactive prototype in a public space and gathered objective and subjective data from 693 participants over the course of three months. We found a high variance in data quality and identified resulting restrictions for suitable research questions. This results in concrete requirements to hardware, software, and analytics, e.g. the need for assessing data quality, and give examples how this approach lets users explore a system and give first-contact feedback which differentiates highly from common in-depth expert analyses.
@InProceedings{braun2018autouiadj2,
author = {Braun, Michael and Roider, Florian and Alt, Florian and Gross, Tom},
booktitle = {{Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications}},
title = {{Automotive Research in the Public Space: Towards Deployment-Based Prototypes For Real Users}},
year = {2018},
address = {New York, NY, USA},
pages = {181--185},
publisher = {ACM},
series = {AutomotiveUI '18},
abstract = {Many automotive user studies allow users to experience and evaluate interactive concepts. They are however often limited to small and specific groups of participants, such as students or experts. This might limit the generalizability of results for future users. A possible solution is to allow a large group of unbiased users to actively experience an interactive prototype and generate new ideas, but there is little experience about the realization and benefits of such an approach. We placed an interactive prototype in a public space and gathered objective and subjective data from 693 participants over the course of three months. We found a high variance in data quality and identified resulting restrictions for suitable research questions. This results in concrete requirements to hardware, software, and analytics, e.g. the need for assessing data quality, and give examples how this approach lets users explore a system and give first-contact feedback which differentiates highly from common in-depth expert analyses.},
acmid = {3265964},
doi = {10.1145/3239092.3265964},
isbn = {978-1-4503-5947-4},
keywords = {Automotive UI, Deployment, Prototypes, User Studies},
numpages = {5},
timestamp = {2018.10.05},
}
M. Kattenbeck, M. A. Kilian, M. Ferstl, F. Alt, and B. Ludwig. Airbot: Using a Work Flow Model for Proactive Assistance in Public Spaces. In Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct (MobileHCI ’18), ACM, New York, NY, USA, 2018, p. 213–220. doi:10.1145/3236112.3236142
[BibTeX] [PDF]
@InProceedings{kattenbeck2018mobilehciadj,
author = {Kattenbeck, Markus and Kilian, Melanie A. and Ferstl, Matthias and Alt, Florian and Ludwig, Bernd},
booktitle = {{Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct}},
title = {{Airbot: Using a Work Flow Model for Proactive Assistance in Public Spaces}},
year = {2018},
address = {New York, NY, USA},
pages = {213--220},
publisher = {ACM},
series = {MobileHCI '18},
acmid = {3236142},
doi = {10.1145/3236112.3236142},
isbn = {978-1-4503-5941-2},
keywords = {assistance system, cooperative problem solving, human-computer interaction, mobile information needs},
location = {Barcelona, Spain},
numpages = {8},
timestamp = {2018.10.01},
}
R. Poguntke, C. Tasci, O. Korhonen, F. Alt, and S. Schneegass. AVotar: Exploring Personalized Avatars for Mobile Interaction with Public Displays. In Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct (MobileHCI ’18), ACM, New York, NY, USA, 2018, p. 1–8. doi:10.1145/3236112.3236113
[BibTeX] [Abstract] [PDF]
Engaging users with public displays has been a major challenge in public display research. Interactive displays often suffer from being ignored by potential users. Research showed that user representations are a valid way to partially address this challenge, e.g., by attracting attention, conveying interactivity, and serving as entry points to gestures and touch interaction. We believe that user representations, particularly personalized avatars, could further increase the attractiveness of public displays, if carefully designed. In this work, we provide first insights on how such avatars can be designed and which properties are important for users. In particular, we present AVotar, a voting application for mobiles that lets users design avatars being utilized to represent them. In an user study we found that users appreciate high degrees of freedom in customization and focus on expressive facial features. Finally, we discuss the findings yielding useful implications for designers of future public display applications employing avatars.
@InProceedings{poguntke2018mobilehciadj,
author = {Poguntke, Romina and Tasci, Cagri and Korhonen, Olli and Alt, Florian and Schneegass, Stefan},
booktitle = {{Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct}},
title = {{AVotar: Exploring Personalized Avatars for Mobile Interaction with Public Displays}},
year = {2018},
address = {New York, NY, USA},
pages = {1--8},
publisher = {ACM},
series = {MobileHCI '18},
abstract = {Engaging users with public displays has been a major challenge in public display research. Interactive displays often suffer from being ignored by potential users. Research showed that user representations are a valid way to partially address this challenge, e.g., by attracting attention, conveying interactivity, and serving as entry points to gestures and touch interaction. We believe that user representations, particularly personalized avatars, could further increase the attractiveness of public displays, if carefully designed. In this work, we provide first insights on how such avatars can be designed and which properties are important for users. In particular, we present AVotar, a voting application for mobiles that lets users design avatars being utilized to represent them. In an user study we found that users appreciate high degrees of freedom in customization and focus on expressive facial features. Finally, we discuss the findings yielding useful implications for designers of future public display applications employing avatars.},
acmid = {3236113},
doi = {10.1145/3236112.3236113},
isbn = {978-1-4503-5941-2},
keywords = {avatars, engagement, personalization, public displays, user representation},
location = {Barcelona, Spain},
numpages = {8},
timestamp = {2018.10.01},
}
M. Khamis, L. Trotter, V. Mäkelä, E. von Zezschwitz, J. Le, A. Bulling, and F. Alt. CueAuth: Comparing Touch, Mid-Air Gestures, and Gaze for Cue-based Authentication on Situated Displays. Proceeding of the ACM on Interactive Mobile Wearable Ubiquitous Technologies (IMWUT), vol. 2, iss. 4, p. 174:1–174:22, 2018. doi:10.1145/3287052
[BibTeX] [Abstract] [PDF]
{Secure authentication on situated displays (e.g., to access sensitive information or to make purchases) is becoming increasingly important. A promising approach to resist shoulder surfing attacks is to employ cues that users respond to while authenticating; this overwhelms observers by requiring them to observe both the cue itself as well as users’ response to the cue. Although previous work proposed a variety of modalities, such as gaze and mid-air gestures, to further improve security, an understanding of how they compare with regard to usability and security is still missing as of today. In this paper, we rigorously compare modalities for cue-based authentication on situated displays. In particular, we provide the first comparison between touch, mid-air gestures, and calibration-free gaze using a state-of-the-art authentication concept. In two in-depth user studies (N=20
@Article{Khamis2018,
author = {Khamis, Mohamed and Trotter, Ludwig and M\"{a}kel\"{a}, Ville and Zezschwitz, Emanuel von and Le, Jens and Bulling, Andreas and Alt, Florian},
journal = {{Proceeding of the ACM on Interactive Mobile Wearable Ubiquitous Technologies (IMWUT)}},
title = {{CueAuth: Comparing Touch, Mid-Air Gestures, and Gaze for Cue-based Authentication on Situated Displays}},
year = {2018},
issn = {2474-9567},
month = dec,
note = {khamis2018imwut},
number = {4},
pages = {174:1--174:22},
volume = {2},
abstract = {Secure authentication on situated displays (e.g., to access sensitive information or to make purchases) is becoming increasingly important. A promising approach to resist shoulder surfing attacks is to employ cues that users respond to while authenticating; this overwhelms observers by requiring them to observe both the cue itself as well as users' response to the cue. Although previous work proposed a variety of modalities, such as gaze and mid-air gestures, to further improve security, an understanding of how they compare with regard to usability and security is still missing as of today. In this paper, we rigorously compare modalities for cue-based authentication on situated displays. In particular, we provide the first comparison between touch, mid-air gestures, and calibration-free gaze using a state-of-the-art authentication concept. In two in-depth user studies (N=20, N=17) we found that the choice of touch or gaze presents a clear tradeoff between usability and security. For example, while gaze input is more secure, it is also more demanding and requires longer authentication times. Mid-air gestures are slightly slower and more secure than touch but users hesitate to use them in public. We conclude with three significant design implications for authentication using touch, mid-air gestures, and gaze and discuss how the choice of modality creates opportunities and challenges for improved authentication in public.},
acmid = {3287052},
address = {New York, NY, USA},
articleno = {174},
doi = {10.1145/3287052},
issue_date = {December 2018},
keywords = {Eye Tracking, Privacy, Public Displays, Pursuits, SwiPIN},
numpages = {22},
publisher = {ACM},
timestamp = {2018.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018imwut.pdf},
}
M. Khamis, A. Kienle, F. Alt, and A. Bulling. GazeDrone: Mobile Eye-Based Interaction in Public Space Without Augmenting the User. In 4th ACM Workshop on Micro Aerial Vehicle Networks, Systems, and Applications (DroNet ’18) (DroNet’18), ACM, New York, NY, USA, 2018, p. 66–71. doi:10.1145/3213526.3213539
[BibTeX] [Abstract] [PDF]
Gaze interaction holds a lot of promise for seamless human-computer interaction. At the same time, current wearable mobile eye trackers require user augmentation that negatively impacts natural user behavior while remote trackers require users to position themselves within a confined tracking range. We present GazeDrone, the first system that combines a camera-equipped aerial drone with a computational method to detect sidelong glances for spontaneous (calibration-free) gaze-based interaction with surrounding pervasive systems (e.g., public displays). GazeDrone does not require augmenting each user with on-body sensors and allows interaction from arbitrary positions, even while moving. We demonstrate that drone-supported gaze interaction is feasible and accurate for certain movement types. It is well-perceived by users, in particular while interacting from a fixed position as well as while moving orthogonally or diagonally to a display. We present design implications and discuss opportunities and challenges for drone-supported gaze interaction in public.
@InProceedings{khamis2018dronet,
author = {Mohamed Khamis and Anna Kienle and Florian Alt and Andreas Bulling},
booktitle = {{4th ACM Workshop on Micro Aerial Vehicle Networks, Systems, and Applications (DroNet '18)}},
title = {{GazeDrone: Mobile Eye-Based Interaction in Public Space Without Augmenting the User}},
year = {2018},
address = {New York, NY, USA},
month = {June},
note = {khamis2018dronet},
pages = {66--71},
publisher = {ACM},
series = {DroNet'18},
abstract = {Gaze interaction holds a lot of promise for seamless human-computer interaction. At the same time, current wearable mobile eye trackers require user augmentation that negatively impacts natural user behavior while remote trackers require users to position themselves within a confined tracking range. We present GazeDrone, the first system that combines a camera-equipped aerial drone with a computational method to detect sidelong glances for spontaneous (calibration-free) gaze-based interaction with surrounding pervasive systems (e.g., public displays). GazeDrone does not require augmenting each user with on-body sensors and allows interaction from arbitrary positions, even while moving. We demonstrate that drone-supported gaze interaction is feasible and accurate for certain movement types. It is well-perceived by users, in particular while interacting from a fixed position as well as while moving orthogonally or diagonally to a display. We present design implications and discuss opportunities and challenges for drone-supported gaze interaction in public.},
doi = {10.1145/3213526.3213539},
keywords = {Active eye tracking, drones, gaze interaction, UAV.},
timestamp = {2018.09.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018dronet},
}
M. Khamis, F. Alt, and A. Bulling. The Past, Present, and Future of Gaze-enabled Handheld Mobile Devices: Survey and Lessons Learned. In Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services (MobileHCI ’18), ACM, New York, NY, USA, 2018, p. 38:1–38:17. doi:10.1145/3229434.3229452
[BibTeX] [Abstract] [PDF]
While first-generation mobile gaze interfaces required special-purpose hardware, recent advances in computational gaze estimation and the availability of sensor-rich and powerful devices is finally fulfilling the promise of pervasive eye tracking and eye-based interaction on off-the-shelf mobile devices. This work provides the first holistic view on the past, present, and future of eye tracking on handheld mobile devices. To this end, we discuss how research developed from building hardware prototypes, to accurate gaze estimation on unmodified smartphones and tablets. We then discuss implications by laying out 1) novel opportunities, including pervasive advertising and conducting in-the-wild eye tracking studies on handhelds, and 2) new challenges that require further research, such as visibility of the user’s eyes, lighting conditions, and privacy implications. We discuss how these developments shape MobileHCI research in the future, possibly the next 20 years.
@InProceedings{khamis2018mobilehci,
author = {Khamis, Mohamed and Alt, Florian and Bulling, Andreas},
booktitle = {{Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services}},
title = {{The Past, Present, and Future of Gaze-enabled Handheld Mobile Devices: Survey and Lessons Learned}},
year = {2018},
address = {New York, NY, USA},
note = {khamis2018mobilehci},
pages = {38:1--38:17},
publisher = {ACM},
series = {MobileHCI '18},
abstract = {While first-generation mobile gaze interfaces required special-purpose hardware, recent advances in computational gaze estimation and the availability of sensor-rich and powerful devices is finally fulfilling the promise of pervasive eye tracking and eye-based interaction on off-the-shelf mobile devices. This work provides the first holistic view on the past, present, and future of eye tracking on handheld mobile devices. To this end, we discuss how research developed from building hardware prototypes, to accurate gaze estimation on unmodified smartphones and tablets. We then discuss implications by laying out 1) novel opportunities, including pervasive advertising and conducting in-the-wild eye tracking studies on handhelds, and 2) new challenges that require further research, such as visibility of the user's eyes, lighting conditions, and privacy implications. We discuss how these developments shape MobileHCI research in the future, possibly the next 20 years.},
acmid = {3229452},
articleno = {38},
doi = {10.1145/3229434.3229452},
isbn = {978-1-4503-5898-9},
keywords = {eye tracking, gaze estimation, gaze interaction, mobile devices, smartphones, tablets},
location = {Barcelona, Spain},
numpages = {17},
timestamp = {2018.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018mobilehci.pdf},
}
L. Mecke, S. Prange, D. Buschek, M. Khamis, M. Hassib, and F. Alt. ‘Outsourcing” Security: Supporting People to Support Older Adults’. In Proceedings of the Mobile HCI ’18 Workshop on Mobile Privacy and Security for an Aging Population (), 2018.
[BibTeX] [Abstract] [PDF]
Older adults often rely on the support of trusted individuals (e.g., younger family members) when performing complex tasks on their mobile devices, such as configuring privacy settings. However, a prominent problem is that systems are designed with the intention of a single “main user” us- ing them, with little to no support for cases where the user would like to get external help from others. In this work, we provide anecdotal evidence of problems faced by support- ers who try to help older adults in privacy and security re- lated tasks. We outline multiple suggestions for future work in this area, and discuss how systems can support people who support older adults.
@InProceedings{mecke2018mobilehciadj,
author = {Lukas Mecke AND Sarah Prange AND Daniel Buschek AND Mohamed Khamis AND Mariam Hassib AND Florian Alt},
title = {{'Outsourcing” Security: Supporting People to Support Older Adults'}},
booktitle = {{Proceedings of the Mobile HCI ’18 Workshop on Mobile Privacy and Security for an Aging Population}},
year = {2018},
abstract = {Older adults often rely on the support of trusted individuals (e.g., younger family members) when performing complex tasks on their mobile devices, such as configuring privacy settings. However, a prominent problem is that systems are designed with the intention of a single “main user” us- ing them, with little to no support for cases where the user would like to get external help from others. In this work, we provide anecdotal evidence of problems faced by support- ers who try to help older adults in privacy and security re- lated tasks. We outline multiple suggestions for future work in this area, and discuss how systems can support people who support older adults.},
owner = {florian},
timestamp = {2018.08.31},
}
T. Mattusch, M. Mirzamohammad, M. Khamis, A. Bulling, and F. Alt. Hidden Pursuits: Evaluating Gaze-selection via Pursuits when the Stimuli’s Trajectory is Partially Hidden. In Proceedings of the 2018 ACM Symposium on Eye Tracking Research & Applications (ETRA ’18), ACM, New York, NY, USA, 2018, p. 27:1–27:5. doi:10.1145/3204493.3204569
[BibTeX] [Abstract] [PDF]
The idea behind gaze interaction using Pursuits is to leverage the human’s smooth pursuit eye movements performed when following moving targets. However, humans can also anticipate where a moving target would reappear if it temporarily hides from their view. In this work, we investigate how well users can select targets using Pursuits in cases where the target’s trajectory is partially invisible (HiddenPursuits): e.g., can users select a moving target that temporarily hides behind another object? Although HiddenPursuits was not studied in the context of interaction before, understanding how well users can perform HiddenPursuits presents numerous opportunities, particularly for small interfaces where a target’s trajectory can cover area outside of the screen. We found that users can still select targets quickly via Pursuits even if their trajectory is up to 50% hidden, and at the expense of longer selection times when the hidden portion is larger. We discuss how gaze-based interfaces can leverage HiddenPursuits for an improved user experience.
@InProceedings{mattusch2018etra,
author = {Mattusch, Thomas and Mirzamohammad, Mahsa and Khamis, Mohamed and Bulling, Andreas and Alt, Florian},
booktitle = {{Proceedings of the 2018 ACM Symposium on Eye Tracking Research \& Applications}},
title = {{Hidden Pursuits: Evaluating Gaze-selection via Pursuits when the Stimuli's Trajectory is Partially Hidden}},
year = {2018},
address = {New York, NY, USA},
note = {mattusch2018etra},
pages = {27:1--27:5},
publisher = {ACM},
series = {ETRA '18},
abstract = {The idea behind gaze interaction using Pursuits is to leverage the human's smooth pursuit eye movements performed when following moving targets. However, humans can also anticipate where a moving target would reappear if it temporarily hides from their view. In this work, we investigate how well users can select targets using Pursuits in cases where the target's trajectory is partially invisible (HiddenPursuits): e.g., can users select a moving target that temporarily hides behind another object? Although HiddenPursuits was not studied in the context of interaction before, understanding how well users can perform HiddenPursuits presents numerous opportunities, particularly for small interfaces where a target's trajectory can cover area outside of the screen. We found that users can still select targets quickly via Pursuits even if their trajectory is up to 50% hidden, and at the expense of longer selection times when the hidden portion is larger. We discuss how gaze-based interfaces can leverage HiddenPursuits for an improved user experience.},
acmid = {3204569},
articleno = {27},
doi = {10.1145/3204493.3204569},
isbn = {978-1-4503-5706-7},
keywords = {displays, hidden trajectory, motion correlation, smooth pursuit},
location = {Warsaw, Poland},
numpages = {5},
timestamp = {2018.06.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mattusch2018etra.pdf},
}
F. Alt, S. Geiger, and W. Höhl. ShapelineGuide: Teaching Mid-Air Gestures for Large Interactive Displays. In Proceedings of the 7th ACM International Symposium on Pervasive Displays (PerDis ’18), ACM, New York, NY, USA, 2018, p. 3:1–3:8. doi:10.1145/3205873.3205887
[BibTeX] [Abstract] [PDF]
We present ShapelineGuide, a dynamic visual guide that supports users of large interactive displays while performing mid-air gestures. Today, we find many examples of large displays supporting interaction through gestures performed in Mid-air. Yet, approaches that support users in learning and executing these gestures are still scarce. Prior approaches require complex setups, are targeted towards the use of 2D gestures, or focus on the initial gestures only. Our work extends state-of-the-art by presenting a feedforward system that provides users constant updates on their gestures. We report on the design and implementation of the approach and present findings from an evaluation of the system in a lab study (N=44), focusing on learning performance, accuracy, and errors. We found that ShapelineGuide helps users with regard to learning the gestures as well as decreases execution times and cognitive load.
@InProceedings{alt2018perdis,
author = {Alt, Florian and Geiger, Sabrina and H\"{o}hl, Wolfgang},
booktitle = {{Proceedings of the 7th ACM International Symposium on Pervasive Displays}},
title = {{ShapelineGuide: Teaching Mid-Air Gestures for Large Interactive Displays}},
year = {2018},
address = {New York, NY, USA},
note = {alt2018perdis},
pages = {3:1--3:8},
publisher = {ACM},
series = {PerDis '18},
abstract = {We present ShapelineGuide, a dynamic visual guide that supports users of large interactive displays while performing mid-air gestures. Today, we find many examples of large displays supporting interaction through gestures performed in Mid-air. Yet, approaches that support users in learning and executing these gestures are still scarce. Prior approaches require complex setups, are targeted towards the use of 2D gestures, or focus on the initial gestures only. Our work extends state-of-the-art by presenting a feedforward system that provides users constant updates on their gestures. We report on the design and implementation of the approach and present findings from an evaluation of the system in a lab study (N=44), focusing on learning performance, accuracy, and errors. We found that ShapelineGuide helps users with regard to learning the gestures as well as decreases execution times and cognitive load.},
acmid = {3205887},
articleno = {3},
doi = {10.1145/3205873.3205887},
isbn = {978-1-4503-5765-4},
keywords = {Displays, Dynamic Guides, Feedback, Feedforward, Gestures},
location = {Munich, Germany},
numpages = {8},
timestamp = {2018.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2018perdis.pdf},
}
A. Colley, J. Häkkilä, M. Forsman, B. Pfleging, and F. Alt. Car Exterior Surface Displays: Exploration in a Real-World Context. In Proceedings of the 7th ACM International Symposium on Pervasive Displays (PerDis ’18), ACM, New York, NY, USA, 2018, p. 7:1–7:8. doi:10.1145/3205873.3205880
[BibTeX] [Abstract] [PDF]
Current changes in the automotive industry towards autonomous vehicles will spur wide ranging changes in the roles of cars in urban environments. When combined with advances in display technology, this creates potential for the outer surfaces of cars to act as public displays. We present a real-world in context study, where participants ideated on a variety of different types of informative content, displayed on or around vehicles. Our study approach utilized handheld projection to create visualization experiences suggestive of the capabilities of future display technologies. The salient findings show that ideas related to the car and the driving function, such as parking, warning pedestrians and changing the vehicles aesthetic appearance, were appreciated. In contrast, ideas where the vehicle formed part of a smart urban infrastructure, such as guiding pedestrians or acting as a public display caused diverse opinions. In particular, concepts where personalized content was shown were disliked for reasons related to privacy and feeling like ‘big brother’ is watching.
@InProceedings{colley2018perdis,
author = {Colley, Ashley and H\"{a}kkil\"{a}, Jonna and Forsman, Meri-Tuulia and Pfleging, Bastian and Alt, Florian},
booktitle = {{Proceedings of the 7th ACM International Symposium on Pervasive Displays}},
title = {{Car Exterior Surface Displays: Exploration in a Real-World Context}},
year = {2018},
address = {New York, NY, USA},
note = {colley2018perdis},
pages = {7:1--7:8},
publisher = {ACM},
series = {PerDis '18},
abstract = {Current changes in the automotive industry towards autonomous vehicles will spur wide ranging changes in the roles of cars in urban environments. When combined with advances in display technology, this creates potential for the outer surfaces of cars to act as public displays. We present a real-world in context study, where participants ideated on a variety of different types of informative content, displayed on or around vehicles. Our study approach utilized handheld projection to create visualization experiences suggestive of the capabilities of future display technologies. The salient findings show that ideas related to the car and the driving function, such as parking, warning pedestrians and changing the vehicles aesthetic appearance, were appreciated. In contrast, ideas where the vehicle formed part of a smart urban infrastructure, such as guiding pedestrians or acting as a public display caused diverse opinions. In particular, concepts where personalized content was shown were disliked for reasons related to privacy and feeling like 'big brother' is watching.},
acmid = {3205880},
articleno = {7},
doi = {10.1145/3205873.3205880},
isbn = {978-1-4503-5765-4},
keywords = {Automotive UI, interactive surfaces, pedestrian guidance, pervasive navigation, projected AR, public displays, spatial augmented reality},
location = {Munich, Germany},
numpages = {8},
timestamp = {2018.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/colley2018perdis.pdf},
}
M. Khamis, C. Oechsner, F. Alt, and A. Bulling. VRpursuits: Interaction in Virtual Reality Using Smooth Pursuit Eye Movements. In Proceedings of the 2018 International Conference on Advanced Visual Interfaces (AVI ’18), ACM, New York, NY, USA, 2018, p. 18:1–18:8. doi:10.1145/3206505.3206522
[BibTeX] [Abstract] [PDF]
Gaze-based interaction using smooth pursuit eye movements (Pursuits) is attractive given that it is intuitive and overcomes the Midas touch problem. At the same time, eye tracking is becoming increasingly popular for VR applications. While Pursuits was shown to be effective in several interaction contexts, it was never explored in-depth for VR before. In a user study (N=26), we investigated how parameters that are specific to VR settings influence the performance of Pursuits. For example, we found that Pursuits is robust against different sizes of virtual 3D targets. However performance improves when the trajectory size (e.g., radius) is larger, particularly if the user is walking while interacting. While walking, selecting moving targets via Pursuits is generally feasible albeit less accurate than when stationary. Finally, we discuss the implications of these findings and the potential of smooth pursuits for interaction in VR by demonstrating two sample use cases: 1) gaze-based authentication in VR, and 2) a space meteors shooting game.
@InProceedings{khamis2018avi,
author = {Khamis, Mohamed and Oechsner, Carl and Alt, Florian and Bulling, Andreas},
booktitle = {{Proceedings of the 2018 International Conference on Advanced Visual Interfaces}},
title = {{VRpursuits: Interaction in Virtual Reality Using Smooth Pursuit Eye Movements}},
year = {2018},
address = {New York, NY, USA},
note = {khamis2018avi},
pages = {18:1--18:8},
publisher = {ACM},
series = {AVI '18},
abstract = {Gaze-based interaction using smooth pursuit eye movements (Pursuits) is attractive given that it is intuitive and overcomes the Midas touch problem. At the same time, eye tracking is becoming increasingly popular for VR applications. While Pursuits was shown to be effective in several interaction contexts, it was never explored in-depth for VR before. In a user study (N=26), we investigated how parameters that are specific to VR settings influence the performance of Pursuits. For example, we found that Pursuits is robust against different sizes of virtual 3D targets. However performance improves when the trajectory size (e.g., radius) is larger, particularly if the user is walking while interacting. While walking, selecting moving targets via Pursuits is generally feasible albeit less accurate than when stationary. Finally, we discuss the implications of these findings and the potential of smooth pursuits for interaction in VR by demonstrating two sample use cases: 1) gaze-based authentication in VR, and 2) a space meteors shooting game.},
acmid = {3206522},
articleno = {18},
doi = {10.1145/3206505.3206522},
isbn = {978-1-4503-5616-9},
keywords = {eye tracking, gaze interaction, pursuits, virtual reality},
location = {Castiglione della Pescaia, Grosseto, Italy},
numpages = {8},
timestamp = {2018.05.31},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018avi.pdf},
}
V. Mäkelä, M. Khamis, L. Mecke, J. James, M. Turunen, and F. Alt. Pocket Transfers: Interaction Techniques for Transferring Content from Situated Displays to Mobile Devices. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI ’18), ACM, New York, NY, USA, 2018, p. 135:1–135:13. doi:10.1145/3173574.3173709
[BibTeX] [Abstract] [PDF]
We present Pocket Transfers: interaction techniques that allow users to transfer content from situated displays to a personal mobile device while keeping the device in a pocket or bag. Existing content transfer solutions require direct manipulation of the mobile device, making inter-action slower and less flexible. Our introduced tech-niques employ touch, mid-air gestures, gaze, and a mul-timodal combination of gaze and mid-air gestures. We evaluated the techniques in a novel user study (N=20), where we considered dynamic scenarios where the user approaches the display, completes the task, and leaves. We show that all pocket transfer techniques are fast and seen as highly convenient. Mid-air gestures are the most efficient touchless method for transferring a single item, while the multimodal method is the fastest touchless method when multiple items are transferred. We provide guidelines to help researchers and practitioners choose the most suitable content transfer techniques for their systems.
@InProceedings{makela2018chi,
author = {M\"{a}kel\"{a}, Ville and Khamis, Mohamed and Mecke, Lukas and James, Jobin and Turunen, Markku and Alt, Florian},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{Pocket Transfers: Interaction Techniques for Transferring Content from Situated Displays to Mobile Devices}},
year = {2018},
address = {New York, NY, USA},
note = {makela2018chi},
pages = {135:1--135:13},
publisher = {ACM},
series = {CHI '18},
abstract = {We present Pocket Transfers: interaction techniques that allow users to transfer content from situated displays to a personal mobile device while keeping the device in a pocket or bag. Existing content transfer solutions require direct manipulation of the mobile device, making inter-action slower and less flexible. Our introduced tech-niques employ touch, mid-air gestures, gaze, and a mul-timodal combination of gaze and mid-air gestures. We evaluated the techniques in a novel user study (N=20), where we considered dynamic scenarios where the user approaches the display, completes the task, and leaves. We show that all pocket transfer techniques are fast and seen as highly convenient. Mid-air gestures are the most efficient touchless method for transferring a single item, while the multimodal method is the fastest touchless method when multiple items are transferred. We provide guidelines to help researchers and practitioners choose the most suitable content transfer techniques for their systems.},
acmid = {3173709},
articleno = {135},
doi = {10.1145/3173574.3173709},
isbn = {978-1-4503-5620-6},
keywords = {content transfer, cross-device interaction, gaze, mid-air gestures, multimodal, public displays, ubiquitous computing},
numpages = {13},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/makela2018chi.pdf},
}
D. Buschek, B. Bisinger, and F. Alt. ResearchIME: A Mobile Keyboard Application for Studying Free Typing Behaviour in the Wild. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI ’18), ACM, New York, NY, USA, 2018, p. 255:1–255:14. doi:10.1145/3173574.3173829
[BibTeX] [Abstract] [PDF]
We present a data logging concept, tool, and analyses to facilitatestudies of everyday mobile touch keyboard use andfree typing behaviour: 1) We propose a filtering concept to logtyping without recording readable text and assess reactions tofilters with a survey (N=349). 2) We release an Android keyboardapp and backend that implement this concept. 3) Basedon a three-week field study (N=30), we present the first analysesof keyboard use and typing biometrics on such free texttyping data in the wild, including speed, postures, apps, autocorrection, and word suggestions. We conclude that researchon mobile keyboards benefits from observing free typing beyondthe lab and discuss ideas for further studies.
@InProceedings{buschek2018chi2,
author = {Buschek, Daniel and Bisinger, Benjamin and Alt, Florian},
title = {{ResearchIME: A Mobile Keyboard Application for Studying Free Typing Behaviour in the Wild}},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
year = {2018},
series = {CHI '18},
pages = {255:1--255:14},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2018chi2},
abstract = {We present a data logging concept, tool, and analyses to facilitatestudies of everyday mobile touch keyboard use andfree typing behaviour: 1) We propose a filtering concept to logtyping without recording readable text and assess reactions tofilters with a survey (N=349). 2) We release an Android keyboardapp and backend that implement this concept. 3) Basedon a three-week field study (N=30), we present the first analysesof keyboard use and typing biometrics on such free texttyping data in the wild, including speed, postures, apps, autocorrection, and word suggestions. We conclude that researchon mobile keyboards benefits from observing free typing beyondthe lab and discuss ideas for further studies.},
acmid = {3173829},
articleno = {255},
comment = {buschek2018chi2},
doi = {10.1145/3173574.3173829},
isbn = {978-1-4503-5620-6},
keywords = {biometrics, data logging, touch keyboard, typing behaviour},
numpages = {14},
timestamp = {2018.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/buschek2018chi2.pdf},
}
D. Buschek, B. Roppelt, and F. Alt. Extending keyboard shortcuts with arm and wrist rotation gestures. In Proceedings of the 2018 chi conference on human factors in computing systems (), Association for Computing Machinery, New York, NY, USA, 2018, p. 1–12.
[BibTeX] [Abstract] [PDF]
We propose and evaluate a novel interaction technique to enhance physical keyboard shortcuts with arm and wrist rotation gestures, performed during keypresses: rolling the wrist, rotating the arm/wrist, and lifting it. This extends the set of shortcuts from key combinations (e.g. ctrl + v) to combinations of key(s) and gesture (e.g. v + roll left) and enables continuous control. We implement this approach for isolated single keypresses, using inertial sensors of a smartwatch. We investigate key aspects in three studies: 1) rotation flexibility per keystroke finger, 2) rotation control, and 3) user-defined gesture shortcuts. As a use case, we employ our technique in a painting application and assess user experience. Overall, results show that arm and wrist rotations during keystrokes can be used for interaction, yet challenges remain for integration into practical applications. We discuss recommendations for applications and ideas for future research.
@InProceedings{buschek2018chi1,
author = {Buschek, Daniel and Roppelt, Bianka and Alt, Florian},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
title = {Extending Keyboard Shortcuts with Arm and Wrist Rotation Gestures},
year = {2018},
address = {New York, NY, USA},
note = {buschek2018chi1},
pages = {1–12},
publisher = {Association for Computing Machinery},
abstract = {We propose and evaluate a novel interaction technique to enhance physical keyboard
shortcuts with arm and wrist rotation gestures, performed during keypresses: rolling
the wrist, rotating the arm/wrist, and lifting it. This extends the set of shortcuts
from key combinations (e.g. ctrl + v) to combinations of key(s) and gesture (e.g.
v + roll left) and enables continuous control. We implement this approach for isolated
single keypresses, using inertial sensors of a smartwatch. We investigate key aspects
in three studies: 1) rotation flexibility per keystroke finger, 2) rotation control,
and 3) user-defined gesture shortcuts. As a use case, we employ our technique in a
painting application and assess user experience. Overall, results show that arm and
wrist rotations during keystrokes can be used for interaction, yet challenges remain
for integration into practical applications. We discuss recommendations for applications
and ideas for future research.},
isbn = {9781450356206},
numpages = {12},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2018chi1.pdf},
}
M. Khamis, C. Becker, A. Bulling, and F. Alt. Which One is Me?: Identifying Oneself on Public Displays. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI ’18), ACM, New York, NY, USA, 2018, p. 287:1–287:12. doi:10.1145/3173574.3173861
[BibTeX] [Abstract] [PDF]
While user representations are extensively used on public displays, it remains unclear how well users can recognize their own representation among those of surrounding users. We study the most widely used representations: abstract objects, skeletons, silhouettes and mirrors. In a prestudy (N=12), we identify five strategies that users follow to recognize themselves on public displays. In a second study (N=19), we quantify the users’ recognition time and accuracy with respect to each representation type. Our findings suggest that there is a significant effect of (1) the representation type, (2) the strategies performed by users, and (3) the combination of both on recognition time and accuracy. We discuss the suitability of each representation for different settings and provide specific recommendations as to how user representations should be applied in multi-user scenarios. These recommendations guide practitioners and researchers in selecting the representation that optimizes the most for the deployment’s requirements, and for the user strategies that are feasible in that environment.
@InProceedings{khamis2018chi1,
author = {Khamis, Mohamed and Becker, Christian and Bulling, Andreas and Alt, Florian},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{Which One is Me?: Identifying Oneself on Public Displays}},
year = {2018},
address = {New York, NY, USA},
note = {khamis2018chi1},
pages = {287:1--287:12},
publisher = {ACM},
series = {CHI '18},
abstract = {While user representations are extensively used on public displays, it remains unclear how well users can recognize their own representation among those of surrounding users. We study the most widely used representations: abstract objects, skeletons, silhouettes and mirrors. In a prestudy (N=12), we identify five strategies that users follow to recognize themselves on public displays. In a second study (N=19), we quantify the users' recognition time and accuracy with respect to each representation type. Our findings suggest that there is a significant effect of (1) the representation type, (2) the strategies performed by users, and (3) the combination of both on recognition time and accuracy. We discuss the suitability of each representation for different settings and provide specific recommendations as to how user representations should be applied in multi-user scenarios. These recommendations guide practitioners and researchers in selecting the representation that optimizes the most for the deployment's requirements, and for the user strategies that are feasible in that environment.},
acmid = {3173861},
articleno = {287},
doi = {10.1145/3173574.3173861},
isbn = {978-1-4503-5620-6},
keywords = {multiple users, public displays, user representations},
numpages = {12},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018chi1.pdf},
}
D. Weber, A. Voit, G. Kollotzek, L. van der Vekens, M. Hepting, F. Alt, and N. Henze. Pd notify: investigating personal content on public displays. In Extended abstracts of the 2018 chi conference on human factors in computing systems (CHI EA ’18), ACM, New York, NY, USA, 2018, p. LBW014:1–LBW014:6. doi:10.1145/3170427.3188475
[BibTeX] [Abstract] [PDF]
Public displays are becoming more and more ubiquitous. Current public displays are mainly used as general information displays or to display advertisements. How personal content should be shown is still an important research topic. In this paper, we present PD Notify, a system that mirrors a user’s pending smartphone notifications on nearby public displays. Notifications are an essential part of current smartphones and inform users about various events, such as new messages, pending updates, personalized news, and upcoming appointments. PD Notify implements privacy settings to control what is shown on the public displays. We conducted an in-situ study in a semi-public work environment for three weeks with seven participants. The results of this first deployment show that displaying personal content on public displays is not only feasible but also valued by users. Participants quickly settled for privacy settings that work for all kinds of content. While they liked the system, they did not want to spend time configuring it.
@InProceedings{weber2018chiea,
author = {Weber, Dominik and Voit, Alexandra and Kollotzek, Gisela and van der Vekens, Lucas and Hepting, Marcus and Alt, Florian and Henze, Niels},
title = {PD Notify: Investigating Personal Content on Public Displays},
booktitle = {Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems},
year = {2018},
series = {CHI EA '18},
pages = {LBW014:1--LBW014:6},
address = {New York, NY, USA},
publisher = {ACM},
note = {weber2018chiea},
abstract = {Public displays are becoming more and more ubiquitous. Current public displays are mainly used as general information displays or to display advertisements. How personal content should be shown is still an important research topic. In this paper, we present PD Notify, a system that mirrors a user's pending smartphone notifications on nearby public displays. Notifications are an essential part of current smartphones and inform users about various events, such as new messages, pending updates, personalized news, and upcoming appointments. PD Notify implements privacy settings to control what is shown on the public displays. We conducted an in-situ study in a semi-public work environment for three weeks with seven participants. The results of this first deployment show that displaying personal content on public displays is not only feasible but also valued by users. Participants quickly settled for privacy settings that work for all kinds of content. While they liked the system, they did not want to spend time configuring it.},
acmid = {3188475},
articleno = {LBW014},
doi = {10.1145/3170427.3188475},
isbn = {978-1-4503-5621-3},
keywords = {notifications, pervasive, privacy, public displays},
numpages = {6},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/weber2018chiea.pdf},
}
M. Khamis, D. Buschek, T. Thieron, F. Alt, and A. Bulling. Eyepact: eye-based parallax correction on touch-enabled interactive displays. Proc. acm interact. mob. wearable ubiquitous technol., vol. 1, iss. 4, p. 146:1–146:18, 2018. doi:10.1145/3161168
[BibTeX] [Abstract] [PDF]
The parallax effect describes the displacement between the perceived and detected touch locations on a touch-enabled surface. Parallax is a key usability challenge for interactive displays, particularly for those that require thick layers of glass between the screen and the touch surface to protect them from vandalism. To address this challenge, we present EyePACT, a method that compensates for input error caused by parallax on public displays. Our method uses a display-mounted depth camera to detect the user’s 3D eye position in front of the display and the detected touch location to predict the perceived touch location on the surface. We evaluate our method in two user studies in terms of parallax correction performance as well as multi-user support. Our evaluations demonstrate that EyePACT (1) significantly improves accuracy even with varying gap distances between the touch surface and the display, (2) adapts to different levels of parallax by resulting in significantly larger corrections with larger gap distances, and (3) maintains a significantly large distance between two users’ fingers when interacting with the same object. These findings are promising for the development of future parallax-free interactive displays.
@Article{khamis2018imwut,
author = {Khamis, Mohamed and Buschek, Daniel and Thieron, Tobias and Alt, Florian and Bulling, Andreas},
title = {EyePACT: Eye-Based Parallax Correction on Touch-Enabled Interactive Displays},
journal = {Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.},
year = {2018},
volume = {1},
number = {4},
pages = {146:1--146:18},
month = jan,
issn = {2474-9567},
note = {khamis2018imwut},
abstract = {The parallax effect describes the displacement between the perceived and detected touch locations on a touch-enabled surface. Parallax is a key usability challenge for interactive displays, particularly for those that require thick layers of glass between the screen and the touch surface to protect them from vandalism. To address this challenge, we present EyePACT, a method that compensates for input error caused by parallax on public displays. Our method uses a display-mounted depth camera to detect the user's 3D eye position in front of the display and the detected touch location to predict the perceived touch location on the surface. We evaluate our method in two user studies in terms of parallax correction performance as well as multi-user support. Our evaluations demonstrate that EyePACT (1) significantly improves accuracy even with varying gap distances between the touch surface and the display, (2) adapts to different levels of parallax by resulting in significantly larger corrections with larger gap distances, and (3) maintains a significantly large distance between two users' fingers when interacting with the same object. These findings are promising for the development of future parallax-free interactive displays.},
acmid = {3161168},
address = {New York, NY, USA},
articleno = {146},
doi = {10.1145/3161168},
issue_date = {December 2017},
keywords = {Gaze, Parallax, Public Displays, Touch screens},
numpages = {18},
publisher = {ACM},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018imwut.pdf},
}
T. Kosch, M. Hassib, P. W. Woźniak, D. Buschek, and F. Alt. Your Eyes Tell: Leveraging Smooth Pursuit for Assessing Cognitive Workload. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI ’18), ACM, New York, NY, USA, 2018, p. 436:1–436:13. doi:10.1145/3173574.3174010
[BibTeX] [Abstract] [PDF]
A common objective for context-aware computing systems is to predict how user interfaces impact user performance regarding their cognitive capabilities. Existing approaches such as questionnaires or pupil dilation measurements either only allow for subjective assessments or are susceptible to environmental influences and user physiology. We address these challenges by exploiting the fact that cognitive workload influences smooth pursuit eye movements. We compared three trajectories and two speeds under different levels of cognitive workload within a user study (N=20). We found higher deviations of gaze points during smooth pursuit eye movements for specific trajectory types at higher cognitive workload levels. Using an SVM classifier, we predict cognitive workload through smooth pursuit with an accuracy of 99.5% for distinguishing between low and high workload as well as an accuracy of 88.1% for estimating workload between three levels of difficulty. We discuss implications and present use cases of how cognition-aware systems benefit from inferring cognitive workload in real-time by smooth pursuit eye movements.
@InProceedings{kosch2018chi,
author = {Kosch, Thomas and Hassib, Mariam and Wo\'{z}niak, Pawe\l W. and Buschek, Daniel and Alt, Florian},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{Your Eyes Tell: Leveraging Smooth Pursuit for Assessing Cognitive Workload}},
year = {2018},
address = {New York, NY, USA},
note = {kosch2018chi},
pages = {436:1--436:13},
publisher = {ACM},
series = {CHI '18},
abstract = {A common objective for context-aware computing systems is to predict how user interfaces impact user performance regarding their cognitive capabilities. Existing approaches such as questionnaires or pupil dilation measurements either only allow for subjective assessments or are susceptible to environmental influences and user physiology. We address these challenges by exploiting the fact that cognitive workload influences smooth pursuit eye movements. We compared three trajectories and two speeds under different levels of cognitive workload within a user study (N=20). We found higher deviations of gaze points during smooth pursuit eye movements for specific trajectory types at higher cognitive workload levels. Using an SVM classifier, we predict cognitive workload through smooth pursuit with an accuracy of 99.5% for distinguishing between low and high workload as well as an accuracy of 88.1% for estimating workload between three levels of difficulty. We discuss implications and present use cases of how cognition-aware systems benefit from inferring cognitive workload in real-time by smooth pursuit eye movements.},
acmid = {3174010},
articleno = {436},
doi = {10.1145/3173574.3174010},
isbn = {978-1-4503-5620-6},
numpages = {13},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/kosch2018chi.pdf},
}
M. Khamis, A. Baier, N. Henze, F. Alt, and A. Bulling. Understanding Face and Eye Visibility in Front-Facing Cameras of Smartphones Used in the Wild. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI ’18), ACM, New York, NY, USA, 2018, p. 280:1–280:12. doi:10.1145/3173574.3173854
[BibTeX] [Abstract] [PDF]
Commodity mobile devices are now equipped with high-resolution front-facing cameras, allowing applications in biometrics (e.g., FaceID in the iPhone X), facial expression analysis, or gaze interaction. However, it is unknown how often users hold devices in a way that allows capturing their face or eyes, and how this impacts detection accuracy. We collected 25,726 in-the-wild photos, taken from the front-facing camera of smartphones as well as associated application usage logs. We found that the full face is visible about 29% of the time, and that in most cases the face is only partially visible. Furthermore, we identified an influence of users’ current activity; for example, when watching videos, the eyes but not the entire face are visible 75% of the time in our dataset. We found that a state-of-the-art face detection algorithm performs poorly against photos taken from front-facing cameras. We discuss how these findings impact mobile applications that leverage face and eye detection, and derive practical implications to address state-of-the art’s limitations.
@InProceedings{khamis2018chi2,
author = {Khamis, Mohamed and Baier, Anita and Henze, Niels and Alt, Florian and Bulling, Andreas},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
title = {{Understanding Face and Eye Visibility in Front-Facing Cameras of Smartphones Used in the Wild}},
year = {2018},
address = {New York, NY, USA},
note = {khamis2018chi2},
pages = {280:1--280:12},
publisher = {ACM},
series = {CHI '18},
abstract = {Commodity mobile devices are now equipped with high-resolution front-facing cameras, allowing applications in biometrics (e.g., FaceID in the iPhone X), facial expression analysis, or gaze interaction. However, it is unknown how often users hold devices in a way that allows capturing their face or eyes, and how this impacts detection accuracy. We collected 25,726 in-the-wild photos, taken from the front-facing camera of smartphones as well as associated application usage logs. We found that the full face is visible about 29% of the time, and that in most cases the face is only partially visible. Furthermore, we identified an influence of users' current activity; for example, when watching videos, the eyes but not the entire face are visible 75% of the time in our dataset. We found that a state-of-the-art face detection algorithm performs poorly against photos taken from front-facing cameras. We discuss how these findings impact mobile applications that leverage face and eye detection, and derive practical implications to address state-of-the art's limitations.},
acmid = {3173854},
articleno = {280},
doi = {10.1145/3173574.3173854},
isbn = {978-1-4503-5620-6},
keywords = {eye tracking, face detection, front-facing camera, gaze estimation, in the wild study, mobile device},
numpages = {12},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018chi2.pdf},
}
L. Mecke, S. Prange, D. Buschek, and F. Alt. A design space for security indicators for behavioural biometrics on mobile touchscreen devices. In Extended abstracts of the 2018 chi conference on human factors in computing systems (CHI EA ’18), ACM, New York, NY, USA, 2018, p. LBW003:1–LBW003:6. doi:10.1145/3170427.3188633
[BibTeX] [Abstract] [PDF]
We propose a design space for security indicators for behavioural biometrics on mobile touchscreen devices. Design dimensions are derived from a focus group with experts and a literature review. The space supports the design of indicators which aim to facilitate users’ decision making, awareness and understanding, as well as increase transparency of behavioural biometrics systems. We conclude with a set of example designs and discuss further extensions, future research questions and study ideas.
@InProceedings{mecke2018chiea,
author = {Mecke, Lukas and Prange, Sarah and Buschek, Daniel and Alt, Florian},
title = {A Design Space for Security Indicators for Behavioural Biometrics on Mobile Touchscreen Devices},
booktitle = {Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems},
year = {2018},
series = {CHI EA '18},
pages = {LBW003:1--LBW003:6},
address = {New York, NY, USA},
publisher = {ACM},
note = {mecke2018chiea},
abstract = {We propose a design space for security indicators for behavioural biometrics on mobile touchscreen devices. Design dimensions are derived from a focus group with experts and a literature review. The space supports the design of indicators which aim to facilitate users' decision making, awareness and understanding, as well as increase transparency of behavioural biometrics systems. We conclude with a set of example designs and discuss further extensions, future research questions and study ideas.},
acmid = {3188633},
articleno = {LBW003},
doi = {10.1145/3170427.3188633},
isbn = {978-1-4503-5621-3},
keywords = {behavioural biometrics, design space, focus group, mobile touchscreen devices, security indicator},
numpages = {6},
timestamp = {2018.04.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2018chiea.pdf},
}
M. Hassib, S. Schneegass, N. Henze, A. Schmidt, and F. Alt. A design space for audience sensing and feedback systems. In Extended abstracts of the 2018 chi conference on human factors in computing systems (CHI EA ’18), ACM, New York, NY, USA, 2018, p. LBW085:1–LBW085:6. doi:10.1145/3170427.3188569
[BibTeX] [Abstract] [PDF]
Audience feedback is a valuable asset in many domains such as arts, education, and marketing. Artists can receive feedback on the experiences created through their performances. Similarly, teachers can receive feedback from students on the understandability of their course content. There are various methods to collect explicit feedback (e.g., questionnaires) – yet they usually impose a burden to the audience. Advances in physiological sensing opens up opportunities for collecting feedback implicitly. This creates unexplored dimensions in the design space of audience sensing. In this work, we chart a comprehensive design space for audience sensing based on a literature and market review which aims to support the designers’ process for creating novel feedback systems.
@InProceedings{hassib2018chiea,
author = {Hassib, Mariam and Schneegass, Stefan and Henze, Niels and Schmidt, Albrecht and Alt, Florian},
title = {A Design Space for Audience Sensing and Feedback Systems},
booktitle = {Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems},
year = {2018},
series = {CHI EA '18},
pages = {LBW085:1--LBW085:6},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2018chiea},
abstract = {Audience feedback is a valuable asset in many domains such as arts, education, and marketing. Artists can receive feedback on the experiences created through their performances. Similarly, teachers can receive feedback from students on the understandability of their course content. There are various methods to collect explicit feedback (e.g., questionnaires) - yet they usually impose a burden to the audience. Advances in physiological sensing opens up opportunities for collecting feedback implicitly. This creates unexplored dimensions in the design space of audience sensing. In this work, we chart a comprehensive design space for audience sensing based on a literature and market review which aims to support the designers' process for creating novel feedback systems.},
acmid = {3188569},
articleno = {LBW085},
doi = {10.1145/3170427.3188569},
isbn = {978-1-4503-5621-3},
keywords = {affective computing, audience sensing},
numpages = {6},
timestamp = {2018.04.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2018chiea.pdf},
}
L. Trotter, S. Prange, M. Khamis, N. Davies, and F. Alt. Design considerations for secure and usable authentication on situated displays. In Proceedings of the 17th international conference on mobile and ubiquitous multimedia (MUM 2018), ACM, New York, NY, USA, 2018, p. 483–490. doi:10.1145/3282894.3289743
[BibTeX] [Abstract] [PDF]
Users often need to authenticate at situated displays in order to, for example, make purchases, access sensitive information, or confirm an identity. However, the exposure of interactions in public spaces introduces a large attack surface (e.g., observation, smudge or thermal attacks). A plethora of authentication models and input modalities that aim at disguising users’ input has been presented in the past. However, a comprehensive analysis on the requirements for secure and usable authentication on public displays is still missing. This work presents 13 design considerations suitable to inform practitioners and researchers during the development process of authentication systems for situated displays in public spaces. It draws on a comprehensive analysis of prior literature and subsequent discussion with five experts in the fields of pervasive displays, human-computer-interaction and usable security.
@InProceedings{trotter2018mumadj,
author = {Trotter, Ludwig and Prange, Sarah and Khamis, Mohamed and Davies, Nigel and Alt, Florian},
title = {Design Considerations for Secure and Usable Authentication on Situated Displays},
booktitle = {Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia},
year = {2018},
series = {MUM 2018},
pages = {483--490},
address = {New York, NY, USA},
publisher = {ACM},
abstract = {Users often need to authenticate at situated displays in order to, for example, make purchases, access sensitive information, or confirm an identity. However, the exposure of interactions in public spaces introduces a large attack surface (e.g., observation, smudge or thermal attacks). A plethora of authentication models and input modalities that aim at disguising users' input has been presented in the past. However, a comprehensive analysis on the requirements for secure and usable authentication on public displays is still missing. This work presents 13 design considerations suitable to inform practitioners and researchers during the development process of authentication systems for situated displays in public spaces. It draws on a comprehensive analysis of prior literature and subsequent discussion with five experts in the fields of pervasive displays, human-computer-interaction and usable security.},
acmid = {3289743},
doi = {10.1145/3282894.3289743},
isbn = {978-1-4503-6594-9},
keywords = {Authentication, Design Considerations, Input Modalities, Public Displays, User Interface Design},
location = {Cairo, Egypt},
numpages = {8},
timestamp = {2011.11.28},
}

### 2017

S. Prange, V. Müller, D. Buschek, and F. Alt. Quakequiz – a case study on deploying a playful display application in a museum context. In Proceedings of the 16th international conference on mobile and ubiquitous multimedia (MUM ’17), ACM, New York, NY, USA, 2017. doi:10.1145/3152832.3152841
[BibTeX] [Abstract] [PDF]
In this paper, we present a case study in which we designed and implemented an interactive museum exhibit. In particular, we extended a section of the museum with an interactive quiz game. The project is an example of an opportunistic deployment where the needs of different stakeholders (museum administration, visitors, researchers) and the properties of the space needed to be considered. It is also an example of how we can apply knowledge on methodology and audience behavior collected over the past years by the research community. At the focus of this paper is (1) the design and concept phase that led to the initial idea for the exhibit, (2) the implementation phase, (3) a roll-out and early insights phase where we tested and refined the application in an iterative design process on-site, and (4) the final deployment as a permanent exhibit of the museum. We hope our report to be useful for researchers and practitioners designing systems for similar contexts.
@InProceedings{prange2017mum,
author = {Sarah Prange and Victoria M\"uller and Daniel Buschek and Florian Alt},
title = {QuakeQuiz - A Case Study on Deploying a Playful Display Application in a Museum Context},
booktitle = {Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia},
year = {2017},
series = {MUM '17},
address = {New York, NY, USA},
publisher = {ACM},
note = {prange2017mum},
abstract = {In this paper, we present a case study in which we designed and implemented an interactive museum exhibit. In particular, we extended a section of the museum with an interactive quiz game. The project is an example of an opportunistic deployment where the needs of different stakeholders (museum administration, visitors, researchers) and the properties of the space needed to be considered. It is also an example of how we can apply knowledge on methodology and audience behavior collected over the past years by the research community. At the focus of this paper is (1) the design and concept phase that led to the initial idea for the exhibit, (2) the implementation phase, (3) a roll-out and early insights phase where we tested and refined the application in an iterative design process on-site, and (4) the final deployment as a permanent exhibit of the museum. We hope our report to be useful for researchers and practitioners designing systems for similar contexts.},
doi = {10.1145/3152832.3152841},
location = {Stuttgart, Germany},
timestamp = {2017.11.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2017mum.pdf},
}
M. Hassib, M. Khamis, S. Friedl, S. Schneegass, and F. Alt. BrainAtWork: Logging Cognitive Engagement and Tasks in the Workplace Using Electroencephalography. In Proceedings of the 16th international conference on mobile and ubiquitous multimedia (MUM ’17), ACM, New York, NY, USA, 2017, p. 305–310. doi:10.1145/3152832.3152865
[BibTeX] [Abstract] [PDF]
Today’s workplaces are dynamic and complex. Digital data sources such as email and video conferencing aim to support workers but also add to their burden of multitasking. Psychophysiological sensors such as Electroencephalography (EEG) can provide users with cues about their cognitive state. We introduce BrainAtWork, a workplace engagement and task logger which shows users their cognitive state while working on different tasks. In a lab study with eleven participants working on their own real-world tasks, we gathered 16 hours of EEG and PC logs which were labeled into three classes: central, peripheral and meta work. We evaluated the usability of BrainAtWork via questionnaires and interviews. We investigated the correlations between measured cognitive engagement from EEG and subjective responses from experience sampling probes. Using random forests classification, we show the feasibility of automatically labeling work tasks into work classes. We discuss how BrainAtWork can support workers on the long term through encouraging reflection and helping in task scheduling.
@InProceedings{hassib2017mum,
author = {Hassib, Mariam and Khamis, Mohamed and Friedl, Susanne and Schneegass, Stefan and Alt, Florian},
title = {{BrainAtWork: Logging Cognitive Engagement and Tasks in the Workplace Using Electroencephalography}},
booktitle = {Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia},
year = {2017},
series = {MUM '17},
pages = {305--310},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2017mum},
abstract = {Today's workplaces are dynamic and complex. Digital data sources such as email and video conferencing aim to support workers but also add to their burden of multitasking. Psychophysiological sensors such as Electroencephalography (EEG) can provide users with cues about their cognitive state. We introduce BrainAtWork, a workplace engagement and task logger which shows users their cognitive state while working on different tasks. In a lab study with eleven participants working on their own real-world tasks, we gathered 16 hours of EEG and PC logs which were labeled into three classes: central, peripheral and meta work. We evaluated the usability of BrainAtWork via questionnaires and interviews. We investigated the correlations between measured cognitive engagement from EEG and subjective responses from experience sampling probes. Using random forests classification, we show the feasibility of automatically labeling work tasks into work classes. We discuss how BrainAtWork can support workers on the long term through encouraging reflection and helping in task scheduling.},
acmid = {3152865},
doi = {10.1145/3152832.3152865},
isbn = {978-1-4503-5378-6},
keywords = {EEG, multitasking, workplace logging},
location = {Stuttgart, Germany},
numpages = {6},
timestamp = {2017.11.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017mum.pdf},
}
M. Khamis, L. Bandelow, S. Schick, D. Casadevall, A. Bulling, and F. Alt. They are all after you: investigating the viability of a threat model that involves multiple shoulder surfers. In Proceedings of the 16th international conference on mobile and ubiquitous multimedia (MUM ’17), ACM, New York, NY, USA, 2017, p. 31–35. doi:10.1145/3152832.3152851
[BibTeX] [Abstract] [PDF]
Many of the authentication schemes for mobile devices that were proposed lately complicate shoulder surfing by splitting the attacker’s attention into two or more entities. For example, multimodal authentication schemes such as GazeTouchPIN and GazeTouchPass require attackers to observe the user’s gaze input and the touch input performed on the phone’s screen. These schemes have always been evaluated against single observers, while multiple observers could potentially attack these schemes with greater ease, since each of them can focus exclusively on one part of the password. In this work, we study the effectiveness of a novel threat model against authentication schemes that split the attacker’s attention. As a case study, we report on a security evaluation of two state of the art authentication schemes in the case of a team of two observers. Our results show that although multiple observers perform better against these schemes than single observers, multimodal schemes are significantly more secure against multiple observers compared to schemes that employ a single modality. We discuss how this threat model impacts the design of authentication schemes.
@InProceedings{khamis2017mum,
author = {Khamis, Mohamed and Bandelow, Linda and Schick, Stina and Casadevall, Dario and Bulling, Andreas and Alt, Florian},
title = {They Are All After You: Investigating the Viability of a Threat Model That Involves Multiple Shoulder Surfers},
booktitle = {Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia},
year = {2017},
series = {MUM '17},
pages = {31--35},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2017mum},
abstract = {Many of the authentication schemes for mobile devices that were proposed lately complicate shoulder surfing by splitting the attacker's attention into two or more entities. For example, multimodal authentication schemes such as GazeTouchPIN and GazeTouchPass require attackers to observe the user's gaze input and the touch input performed on the phone's screen. These schemes have always been evaluated against single observers, while multiple observers could potentially attack these schemes with greater ease, since each of them can focus exclusively on one part of the password. In this work, we study the effectiveness of a novel threat model against authentication schemes that split the attacker's attention. As a case study, we report on a security evaluation of two state of the art authentication schemes in the case of a team of two observers. Our results show that although multiple observers perform better against these schemes than single observers, multimodal schemes are significantly more secure against multiple observers compared to schemes that employ a single modality. We discuss how this threat model impacts the design of authentication schemes.},
acmid = {3152851},
doi = {10.1145/3152832.3152851},
isbn = {978-1-4503-5378-6},
keywords = {gaze gestures, multimodal authentication, multiple observers, privacy, shoulder surfing, threat model},
location = {Stuttgart, Germany},
numpages = {5},
timestamp = {2017.11.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017mum.pdf},
}
F. Alt and L. Ziegler. PD-Survey – Supporting Audience-Centric Research through Surveys on Public Display Networks. In Proceedings of the 25th international acm conference on multimedia (MM’17), ACM, New York, NY, USA, 2017.
[BibTeX] [Abstract] [PDF]
We present PD-Survey, a platform to conduct surveys across a network of interactive screens. Our research is motivated by the fact that obtaining and analyzing data about users of public displays requires signi cant effort; e.g., running long-term observations or post-hoc analyses of video/interaction logs. As a result, research is often constrained to a single installation within a particular context, neither accounting for a diverse audience (children, shoppers, commuters) nor for different situations (waiting vs. passing by) or times of the day. As displays become networked, one way to address this challenge is through surveys on displays, where audience feedback is collected insitu. Since current tools do not appropriately address the requirements of a display network, we implemented a tool for use on public displays and report on its design and development. Our research is complemented by two in-the-wild deployments that (a) investigate di erent channels for feedback collection, (b) showcase how the work of researchers is supported, and (c) testify that the platform can easily be extended with novel features.
@InProceedings{alt2017mm,
author = {Alt, Florian AND Ziegler, Lukas},
title = {{PD-Survey - Supporting Audience-Centric Research through Surveys on Public Display Networks}},
booktitle = {Proceedings of the 25th International ACM Conference on Multimedia},
year = {2017},
series = {MM'17},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2017mm},
abstract = {We present PD-Survey, a platform to conduct surveys across a network of interactive screens. Our research is motivated by the fact that obtaining and analyzing data about users of public displays requires signi cant effort; e.g., running long-term observations or post-hoc analyses of video/interaction logs. As a result, research is often constrained to a single installation within a particular context, neither accounting for a diverse audience (children, shoppers, commuters) nor for different situations (waiting vs. passing by) or times of the day. As displays become networked, one way to address this challenge is through surveys on displays, where audience feedback is collected insitu. Since current tools do not appropriately address the requirements of a display network, we implemented a tool for use on public displays and report on its design and development. Our research is complemented by two in-the-wild deployments that (a) investigate di erent channels for feedback collection, (b) showcase how the work of researchers is supported, and (c) testify that the platform can easily be extended with novel features.},
keywords = {public displays, surveys},
location = {Mountain View, CA, USA},
numpages = {9},
timestamp = {2017.10.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2017mm.pdf},
}
M. Khamis, M. Hassib, E. von Zezschwitz, A. Bulling, and F. Alt. Gazetouchpin: protecting sensitive data on mobile devices using secure multimodal authentication. In Proceedings of the 19th acm international conference on multimodal interaction (ICMI 2017), ACM, New York, NY, USA, 2017. doi:10.1145/3136755.3136809
[BibTeX] [Abstract] [PDF]
Although mobile devices provide access to a plethora of sensitive data, most users still only protect them with PINs or patterns, which are vulnerable to side-channel attacks (e.g., shoulder surfing). How-ever, prior research has shown that privacy-aware users are willing to take further steps to protect their private data. We propose GazeTouchPIN, a novel secure authentication scheme for mobile devices that combines gaze and touch input. Our multimodal approach complicates shoulder-surfing attacks by requiring attackers to ob-serve the screen as well as the user’s eyes to and the password. We evaluate the security and usability of GazeTouchPIN in two user studies (N=30). We found that while GazeTouchPIN requires longer entry times, privacy aware users would use it on-demand when feeling observed or when accessing sensitive data. The results show that successful shoulder surfing attack rate drops from 68% to 10.4%when using GazeTouchPIN.
@InProceedings{khamis2017icmi,
author = {Khamis, Mohamed and Hassib, Mariam and von Zezschwitz, Emanuel and Bulling, Andreas and Alt, Florian},
title = {GazeTouchPIN: Protecting Sensitive Data on Mobile Devices using Secure Multimodal Authentication},
booktitle = {Proceedings of the 19th ACM International Conference on Multimodal Interaction},
year = {2017},
series = {ICMI 2017},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2017icmi},
abstract = {Although mobile devices provide access to a plethora of sensitive data, most users still only protect them with PINs or patterns, which are vulnerable to side-channel attacks (e.g., shoulder surfing). How-ever, prior research has shown that privacy-aware users are willing to take further steps to protect their private data. We propose GazeTouchPIN, a novel secure authentication scheme for mobile devices that combines gaze and touch input. Our multimodal approach complicates shoulder-surfing attacks by requiring attackers to ob-serve the screen as well as the user’s eyes to and the password. We evaluate the security and usability of GazeTouchPIN in two user studies (N=30). We found that while GazeTouchPIN requires longer entry times, privacy aware users would use it on-demand when feeling observed or when accessing sensitive data. The results show that successful shoulder surfing attack rate drops from 68% to 10.4%when using GazeTouchPIN.},
acmid = {3136809},
doi = {10.1145/3136755.3136809},
isbn = {978-1-4503-5543-8/17/11},
location = {Glasgow, Scotland},
numpages = {5},
timestamp = {2017.10.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017icmi.pdf},
}
D. Buschek, J. Kinshofer, and F. Alt. A Comparative Evaluation of Spatial Targeting Behaviour Patterns for Finger and Stylus Tapping on Mobile Touchscreen Devices. , ACM, New York, NY, USA, 2017, p. 126:1–126:21. doi:10.1145/3161160
[BibTeX] [Abstract] [PDF]
Models of 2D targeting error patterns have been applied as a valuable computational tool for analysing finger touch behaviour on mobile devices, improving touch accuracy and inferring context. However, their use in stylus input is yet unexplored. This paper presents the first empirical study and analyses of such models for tapping with a stylus. In a user study (N = 28), we collected targeting data on a smartphone, both for stationary use (sitting) and walking. We compare targeting patterns between index finger input and three stylus variations – two stylus widths and nib types as well as the addition of a hover cursor. Our analyses reveal that stylus targeting patterns are user-specific, and that offset models improve stylus tapping accuracy, but less so than for finger touch. Input method has a stronger influence on targeting patterns than mobility, and stylus width is more influential than the hover cursor. Stylus models improve finger accuracy as well, but not vice versa. The extent of the stylus accuracy advantage compared to the finger depends on screen location and mobility. We also discuss patterns related to mobility and gliding of the stylus on the screen. We conclude with implications for target sizes and offset model applications.
@InProceedings{buschek2017imwut,
author = {Buschek, Daniel and Kinshofer, Julia and Alt, Florian},
title = {{A Comparative Evaluation of Spatial Targeting Behaviour Patterns for Finger and Stylus Tapping on Mobile Touchscreen Devices}},
year = {2017},
volume = {1},
number = {4},
pages = {126:1--126:21},
address = {New York, NY, USA},
month = jan,
publisher = {ACM},
note = {buschek2017imwut},
abstract = {Models of 2D targeting error patterns have been applied as a valuable computational tool for analysing finger touch behaviour on mobile devices, improving touch accuracy and inferring context. However, their use in stylus input is yet unexplored. This paper presents the first empirical study and analyses of such models for tapping with a stylus. In a user study (N = 28), we collected targeting data on a smartphone, both for stationary use (sitting) and walking. We compare targeting patterns between index finger input and three stylus variations -- two stylus widths and nib types as well as the addition of a hover cursor. Our analyses reveal that stylus targeting patterns are user-specific, and that offset models improve stylus tapping accuracy, but less so than for finger touch. Input method has a stronger influence on targeting patterns than mobility, and stylus width is more influential than the hover cursor. Stylus models improve finger accuracy as well, but not vice versa. The extent of the stylus accuracy advantage compared to the finger depends on screen location and mobility. We also discuss patterns related to mobility and gliding of the stylus on the screen. We conclude with implications for target sizes and offset model applications.},
acmid = {3161160},
articleno = {126},
doi = {10.1145/3161160},
issn = {2474-9567},
issue_date = {December 2017},
journal = {Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.},
keywords = {Gaussian Process regression, Stylus input, computational interaction, offset model},
numpages = {21},
timestamp = {2017.10.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2017imwut.pdf},
}
E. Lösch, F. Alt, and M. Koch. Mirror, mirror on the wall: attracting passers-by to public touch displays with user representations. In Proceedings of the 2017 acm international conference on interactive surfaces and spaces (ISS ’17), ACM, New York, NY, USA, 2017, p. 22–31. doi:10.1145/3132272.3134129
[BibTeX] [Abstract] [PDF]
In this paper, we investigate how effectively users’ representations convey interactivity and foster interaction on large information touch displays. This research is motivated by the fact that user representations have been shown to be very efficient in playful applications that support mid-air interaction. At the same time, little is known about the effects of applying this approach to settings with a different primary mode of interaction, e.g. touch. It is also unclear how the playfulness of user representations influences the interest of users in the displayed information. To close this gap, we combine a touch display with screens showing life-sized video representations of passers-by. In a deployment, we compare different spatial arrangements to understand how passers-by are attracted and enticed to interact, how they explore the application, and how they socially behave. Findings reveal that (a) opposing displays foster interaction, but (b) may also reduce interaction at the main display; (c) a large intersection between focus and nimbus helps to notice interactivity; (d) using playful elements at information displays is not counterproductive; (e) mixed interaction modalities are hard to understand.
@InProceedings{loesch2017iss,
author = {L\"{o}sch, Eva and Alt, Florian and Koch, Michael},
title = {Mirror, Mirror on the Wall: Attracting Passers-by to Public Touch Displays With User Representations},
booktitle = {Proceedings of the 2017 ACM International Conference on Interactive Surfaces and Spaces},
year = {2017},
series = {ISS '17},
pages = {22--31},
address = {New York, NY, USA},
publisher = {ACM},
note = {loesch2017iss},
abstract = {In this paper, we investigate how effectively users' representations convey interactivity and foster interaction on large information touch displays. This research is motivated by the fact that user representations have been shown to be very efficient in playful applications that support mid-air interaction. At the same time, little is known about the effects of applying this approach to settings with a different primary mode of interaction, e.g. touch. It is also unclear how the playfulness of user representations influences the interest of users in the displayed information. To close this gap, we combine a touch display with screens showing life-sized video representations of passers-by. In a deployment, we compare different spatial arrangements to understand how passers-by are attracted and enticed to interact, how they explore the application, and how they socially behave. Findings reveal that (a) opposing displays foster interaction, but (b) may also reduce interaction at the main display; (c) a large intersection between focus and nimbus helps to notice interactivity; (d) using playful elements at information displays is not counterproductive; (e) mixed interaction modalities are hard to understand.},
acmid = {3134129},
doi = {10.1145/3132272.3134129},
isbn = {978-1-4503-4691-7},
keywords = {Public Displays, Touch Interaction, User Representations},
location = {Brighton, United Kingdom},
numpages = {10},
timestamp = {2017.10.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/loesch2017iss.pdf},
}
M. Koch and F. Alt. Allgegenwärtige mensch-computer-interaktion. Informatik-spektrum, p. 1–6, 2017. doi:10.1007/s00287-017-1027-4
[BibTeX] [Abstract] [PDF]
Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verständlicher Benutzerschnittstellen – sowohl für Individuen als auch für Gruppen von Benutzern. Mit diesem Teilbereich der Informatik beschäftigt sich die Mensch-Maschine-Interaktion. Dieser Artikel gibt einen Einblick in die Forschungsaktivitäten zu diesem Thema an den Münchner Universitäten. Im Fokus stehen hierbei Arbeiten zu öffentlichen Bildschirmen, Blickinteraktion im öffentlichen Raum sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren.
@Article{koch2017informatikspektrum,
author = {Koch, Michael and Alt, Florian},
journal = {Informatik-Spektrum},
title = {Allgegenw{\"a}rtige Mensch-Computer-Interaktion},
year = {2017},
issn = {1432-122X},
note = {koch2017informatikspektrum},
pages = {1--6},
abstract = {Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verst{\"a}ndlicher Benutzerschnittstellen -- sowohl f{\"u}r Individuen als auch f{\"u}r Gruppen von Benutzern. Mit diesem Teilbereich der Informatik besch{\"a}ftigt sich die Mensch-Maschine-Interaktion. Dieser Artikel gibt einen Einblick in die Forschungsaktivit{\"a}ten zu diesem Thema an den M{\"u}nchner Universit{\"a}ten. Im Fokus stehen hierbei Arbeiten zu {\"o}ffentlichen Bildschirmen, Blickinteraktion im {\"o}ffentlichen Raum sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren.},
doi = {10.1007/s00287-017-1027-4},
timestamp = {2017.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/koch2017informatikspektrum.pdf},
}
D. Buschek, M. Hackenschmied, and F. Alt. Dynamic ui adaptations for one-handed use of large mobile touchscreen devices. In Proceedings of the ifip conference on human-computer interaction (INTERACT’17), 2017, p. 184–201.
[BibTeX] [Abstract] [PDF]
We present and evaluate dynamic adaptations for mobiletouch GUIs. They mitigate reachability problems that users face whenoperating large smartphones or \phablets” with a single hand. In particular,we enhance common touch GUI elements with three simple animatedlocation and orientation changes (Roll, Bend, Move). Users cantrigger them to move GUI elements within comfortable reach. A labstudy (N=35) with two devices (4.95 in, 5.9 in) shows that these adaptationsimprove reachability on the larger device. They also reduce devicemovements required to reach the targets. Participants perceived adaptedUIs as faster, less exhausting and more comfortable to use than thebaselines. Feedback and video analyses also indicate that participants retaineda safer grip on the device through our adaptations. We concludewith design implications for (adaptive) touch GUIs on large devices.
@InProceedings{buschek2017interact,
author = {Buschek, Daniel and Hackenschmied, Maximilian and Alt, Florian},
title = {Dynamic UI Adaptations for One-Handed Use of Large Mobile Touchscreen Devices},
booktitle = {Proceedings of the IFIP Conference on Human-Computer Interaction},
year = {2017},
series = {INTERACT'17},
pages = {184--201},
organization = {Springer},
note = {buschek2017interact},
abstract = {We present and evaluate dynamic adaptations for mobiletouch GUIs. They mitigate reachability problems that users face whenoperating large smartphones or \phablets" with a single hand. In particular,we enhance common touch GUI elements with three simple animatedlocation and orientation changes (Roll, Bend, Move). Users cantrigger them to move GUI elements within comfortable reach. A labstudy (N=35) with two devices (4.95 in, 5.9 in) shows that these adaptationsimprove reachability on the larger device. They also reduce devicemovements required to reach the targets. Participants perceived adaptedUIs as faster, less exhausting and more comfortable to use than thebaselines. Feedback and video analyses also indicate that participants retaineda safer grip on the device through our adaptations. We concludewith design implications for (adaptive) touch GUIs on large devices.},
timestamp = {2017.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2017interact.pdf},
}
M. Koch and F. Alt, “Allgegenwärtige mensch-computer-interaktion,” in 50 jahre universitäts-informatik in münchen, A. Bode, M. Broy, H. Bungartz, and F. Matthes, Eds., Berlin, Heidelberg: Springer Berlin Heidelberg, 2017, p. 11–31. doi:10.1007/978-3-662-54712-0_2
[BibTeX] [Abstract] [PDF]
Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verständlicher Benutzerschnittstellen – sowohl für Individuen als auch für Gruppen von Benutzern. Mit diesem Teilbereich der Informatik beschäftigt sich die Mensch‐Computer‐Interaktion. Dieser Beitrag bietet zunächst eine kurze Einführung in die Forschungsmethodik der MCI und gibt einen Einblick in die Forschungsaktivitäten zu diesem Thema an den Münchner Universitäten. Im Fokus stehen hierbei Arbeiten zu öffentlichen Bildschirmen, Blickinteraktion im öffentlichen Raum, sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren.
@InBook{koch2017informatikmuenchen,
pages = {11--31},
title = {Allgegenw{\"a}rtige Mensch-Computer-Interaktion},
publisher = {Springer Berlin Heidelberg},
year = {2017},
author = {Koch, Michael and Alt, Florian},
editor = {Bode, Arndt and Broy, Manfred and Bungartz, Hans-Joachim and Matthes, Florian},
isbn = {978-3-662-54712-0},
note = {koch2017informatikmuenchen},
abstract = {Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verständlicher Benutzerschnittstellen – sowohl für Individuen als auch für Gruppen von Benutzern. Mit diesem Teilbereich der Informatik beschäftigt sich die Mensch‐Computer‐Interaktion. Dieser Beitrag bietet zunächst eine kurze Einführung in die Forschungsmethodik der MCI und gibt einen Einblick in die Forschungsaktivitäten zu diesem Thema an den Münchner Universitäten. Im Fokus stehen hierbei Arbeiten zu öffentlichen Bildschirmen, Blickinteraktion im öffentlichen Raum, sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren.},
booktitle = {50 Jahre Universit{\"a}ts-Informatik in M{\"u}nchen},
doi = {10.1007/978-3-662-54712-0_2},
timestamp = {2017.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/koch2017informatikmuenchen.pdf},
}
L. Trotter, C. Mai, and F. Alt. Carsketch: a collaborative sketching table with self-propelled tangible objects for automotive applications. In Proceedings of the 9th international conference on automotive user interfaces and interactive vehicular applications adjunct (AutomotiveUI ’17), ACM, New York, NY, USA, 2017, p. 126–130. doi:10.1145/3131726.3131749
[BibTeX] [Abstract] [PDF]
We present CarSketch, a concept and prototype of a collaborative sketching table that supports interdisciplinary development teams during the early development phase of driver assistance systems. Due to the high costs caused by the use of physical prototypes, simulation is a common approach. Yet, the operation of state-of-the-art simulations is restricted to specialists, leaving the majority of stakeholders as passive observers. Our system for a collaborative and multi-perspective communication tool enables all participants to interact with the simulation. In particular, it (1) structures the ideation and development by providing a distraction-free environment with an easy-to-use drawing interface, (2) which is used by self-propelled tangibles to monitor and influence the simulation. (3) Additional information is provided by personal augmentation and (4) the simulation can be replayed in an immersive 3D environment. We expect the tool to be useful for multidisciplinary teams in fostering the ideation phase and finding conceptual mistakes more efficiently.
@InProceedings{trotter2017autouiadj,
author = {Trotter, Ludwig and Mai, Christian and Alt, Florian},
title = {CarSketch: A Collaborative Sketching Table with Self-Propelled Tangible Objects for Automotive Applications},
booktitle = {Proceedings of the 9th International Conference on Automotive User Interfaces and Interactive Vehicular Applications Adjunct},
year = {2017},
series = {AutomotiveUI '17},
pages = {126--130},
address = {New York, NY, USA},
publisher = {ACM},
abstract = {We present CarSketch, a concept and prototype of a collaborative sketching table that supports interdisciplinary development teams during the early development phase of driver assistance systems. Due to the high costs caused by the use of physical prototypes, simulation is a common approach. Yet, the operation of state-of-the-art simulations is restricted to specialists, leaving the majority of stakeholders as passive observers. Our system for a collaborative and multi-perspective communication tool enables all participants to interact with the simulation. In particular, it (1) structures the ideation and development by providing a distraction-free environment with an easy-to-use drawing interface, (2) which is used by self-propelled tangibles to monitor and influence the simulation. (3) Additional information is provided by personal augmentation and (4) the simulation can be replayed in an immersive 3D environment. We expect the tool to be useful for multidisciplinary teams in fostering the ideation phase and finding conceptual mistakes more efficiently.},
acmid = {3131749},
doi = {10.1145/3131726.3131749},
isbn = {978-1-4503-5151-5},
keywords = {Automotive, collaborative work, simulation},
location = {Oldenburg, Germany},
numpages = {5},
timestamp = {2017.09.22},
}
A. Colley, J. Häkkilä, B. Pfleging, and F. Alt. A design space for external displays on cars. In Proceedings of the 9th international conference on automotive user interfaces and interactive vehicular applications adjunct (AutomotiveUI ’17), ACM, New York, NY, USA, 2017, p. 146–151. doi:10.1145/3131726.3131760
[BibTeX] [Abstract] [PDF]
The exterior surfaces of cars provide so far unutilized opportunities for information display. The exploitation of this space is enabled by current advances in display technologies combined with increased sensor integration, computing power, and connectivity in vehicles. With this motivation, we present a framework, mapping the design space for external vehicle displays. The audience for the displayed information may be other road users, pedestrians, or autonomous systems. This design direction is particularly interesting in the future, as the current direction towards driverless vehicles may be an enabler for increased separation, redesign, and repurposing of vehicle interior and exterior surfaces.
@InProceedings{colley2017autoui,
author = {Colley, Ashley and H\"{a}kkil\"{a}, Jonna and Pfleging, Bastian and Alt, Florian},
title = {A Design Space for External Displays on Cars},
booktitle = {Proceedings of the 9th International Conference on Automotive User Interfaces and Interactive Vehicular Applications Adjunct},
year = {2017},
series = {AutomotiveUI '17},
pages = {146--151},
address = {New York, NY, USA},
publisher = {ACM},
note = {colley2017autoui},
abstract = {The exterior surfaces of cars provide so far unutilized opportunities for information display. The exploitation of this space is enabled by current advances in display technologies combined with increased sensor integration, computing power, and connectivity in vehicles. With this motivation, we present a framework, mapping the design space for external vehicle displays. The audience for the displayed information may be other road users, pedestrians, or autonomous systems. This design direction is particularly interesting in the future, as the current direction towards driverless vehicles may be an enabler for increased separation, redesign, and repurposing of vehicle interior and exterior surfaces.},
acmid = {3131760},
doi = {10.1145/3131726.3131760},
isbn = {978-1-4503-5151-5},
keywords = {Automotive UI, cars, design space, interactive surfaces, public displays},
location = {Oldenburg, Germany},
numpages = {6},
timestamp = {2017.09.22},
url = {http://www.florian-alt.org/unibw/wp-content/publications/colley2017autoui.pdf},
}
M. Braun, N. Broy, B. Pfleging, and F. Alt. A design space for conversational in-vehicle information systems. In Proceedings of the 19th international conference on human-computer interaction with mobile devices and services (MobileHCI ’17), ACM, New York, NY, USA, 2017, p. 79:1–79:8. doi:10.1145/3098279.3122122
[BibTeX] [Abstract] [PDF]
In this paper we chart a design space for conversational in-vehicle information systems (IVIS). Our work is motivated by the proliferation of speech interfaces in our everyday life, which have already found their way into consumer electronics and will most likely become pervasive in future cars. Our design space is based on expert interviews as well as a comprehensive literature review. We present five core dimensions – assistant, position, dialog design, system capabilities, and driver state – and show in an initial study how these dimensions affect the design of a prototypical IVIS. Design spaces have paved the way for much of the work done in HCI including but not limited to areas such as input and pointing devices, smart phones, displays, and automotive UIs. In a similar way, we expect our design space to aid practitioners in designing future IVIS but also researchers as they explore this young area of research.
@InProceedings{braun2017mobilehciadj,
author = {Braun, Michael and Broy, Nora and Pfleging, Bastian and Alt, Florian},
title = {A Design Space for Conversational In-vehicle Information Systems},
booktitle = {Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services},
year = {2017},
series = {MobileHCI '17},
pages = {79:1--79:8},
address = {New York, NY, USA},
publisher = {ACM},
abstract = {In this paper we chart a design space for conversational in-vehicle information systems (IVIS). Our work is motivated by the proliferation of speech interfaces in our everyday life, which have already found their way into consumer electronics and will most likely become pervasive in future cars. Our design space is based on expert interviews as well as a comprehensive literature review. We present five core dimensions - assistant, position, dialog design, system capabilities, and driver state - and show in an initial study how these dimensions affect the design of a prototypical IVIS. Design spaces have paved the way for much of the work done in HCI including but not limited to areas such as input and pointing devices, smart phones, displays, and automotive UIs. In a similar way, we expect our design space to aid practitioners in designing future IVIS but also researchers as they explore this young area of research.},
acmid = {3122122},
articleno = {79},
doi = {10.1145/3098279.3122122},
isbn = {978-1-4503-5075-4},
keywords = {automotive user interfaces, design space, natural language interfaces, speech interaction},
location = {Vienna, Austria},
numpages = {8},
timestamp = {2017.09.01},
}
V. Gentile, M. Khamis, S. Sorce, and F. Alt. They are looking at me! understanding how audience presence impacts on public display users. In Proceedings of the 6th international symposium on pervasive displays (PerDis ’17), ACM, New York, NY, USA, 2017. doi:10.1145/3078810.3078822
[BibTeX] [Abstract] [PDF]
It is well known from prior work, that people interacting aswell as attending to a public display attract further peopleto interact. This behavior is commonly referred to as thehoneypot effect. At the same time, there are often situationswhere an audience is present in the vicinity of a public displaythat does not actively engage or pay attention to the display oran approaching user. However, it is largely unknown how sucha passive audience impacts on users or people who intendto interact. In this paper, we investigate the influence of apassive audience on the engagement of people with a publicdisplay. In more detail, we report on the deployment of adisplay in a public space. We collected and analyzed videologs to understand how people react to passive audience in thevicinity of public displays. We found an influence on whereinteracting users position themselves relative to both displayand passive audience as well as on their behavior. Our findingsare valuable for display providers and space owners who wantto maximize the display’s benefits.
@InProceedings{gentile2017perdis,
author = {Gentile, Vito and Khamis, Mohamed and Sorce, Salvatore and Alt, Florian},
title = {They are looking at me! Understanding how Audience Presence Impacts on Public Display Users},
booktitle = {Proceedings of the 6th International Symposium on Pervasive Displays},
year = {2017},
series = {PerDis '17},
address = {New York, NY, USA},
publisher = {ACM},
note = {gentile2017perdis},
abstract = {It is well known from prior work, that people interacting aswell as attending to a public display attract further peopleto interact. This behavior is commonly referred to as thehoneypot effect. At the same time, there are often situationswhere an audience is present in the vicinity of a public displaythat does not actively engage or pay attention to the display oran approaching user. However, it is largely unknown how sucha passive audience impacts on users or people who intendto interact. In this paper, we investigate the influence of apassive audience on the engagement of people with a publicdisplay. In more detail, we report on the deployment of adisplay in a public space. We collected and analyzed videologs to understand how people react to passive audience in thevicinity of public displays. We found an influence on whereinteracting users position themselves relative to both displayand passive audience as well as on their behavior. Our findingsare valuable for display providers and space owners who wantto maximize the display’s benefits.},
acmid = {3078822},
doi = {10.1145/3078810.3078822},
isbn = {978-1-4503-5045-7/17/06},
location = {Lugano, Switzerland},
numpages = {9},
timestamp = {2017.06.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/gentile2017perdis.pdf},
}
D. Huber, D. Buschek, and F. Alt. Don’t leave: combining sensing technology and second screens to enhance the user experience with tv content. In Proceedings of the 2017 acm international conference on interactive experiences for tv and online video (TVX ’17), ACM, New York, NY, USA, 2017, p. 115–121. doi:10.1145/3077548.3077561
[BibTeX] [Abstract] [PDF]
In this paper we explore how the use of sensing technologies can enhance people’s experience during perceiving TV content. The work is motivated by an increasing number of sensors (such as Kinect) that find their way into living rooms. Such sensors allow the behavior of viewers to be analyzed, hence providing the opportunity to instantly react to this behavior. The particular idea we explore in our work is how a second screen app triggered by the viewer’s behavior can be designed to make them re-engage with the TV content. At the outset of our work we conducted a survey (N=411) to assess viewers’ activities while watching TV. Based on the findings we implemented a Kinect-based system to detect these activities and connected it with a playful second screen app. We then conducted a field evaluation (N=20) where we compared (a) four hints to direct users’ attention to the second screen app and (b) four types of second screen content requiring different levels of engagement. We conclude with implications for both practitioners and researchers concerned with interactive TV
@InProceedings{huber2017tvx,
author = {Huber, Daniela and Buschek, Daniel and Alt, Florian},
title = {Don't Leave: Combining Sensing Technology and Second Screens to Enhance the User Experience with TV Content},
booktitle = {Proceedings of the 2017 ACM International Conference on Interactive Experiences for TV and Online Video},
year = {2017},
series = {TVX '17},
pages = {115--121},
address = {New York, NY, USA},
publisher = {ACM},
note = {huber2017tvx},
abstract = {In this paper we explore how the use of sensing technologies can enhance people's experience during perceiving TV content. The work is motivated by an increasing number of sensors (such as Kinect) that find their way into living rooms. Such sensors allow the behavior of viewers to be analyzed, hence providing the opportunity to instantly react to this behavior. The particular idea we explore in our work is how a second screen app triggered by the viewer's behavior can be designed to make them re-engage with the TV content. At the outset of our work we conducted a survey (N=411) to assess viewers' activities while watching TV. Based on the findings we implemented a Kinect-based system to detect these activities and connected it with a playful second screen app. We then conducted a field evaluation (N=20) where we compared (a) four hints to direct users' attention to the second screen app and (b) four types of second screen content requiring different levels of engagement. We conclude with implications for both practitioners and researchers concerned with interactive TV},
acmid = {3077561},
doi = {10.1145/3077548.3077561},
isbn = {978-1-4503-4529-3},
location = {Hilversum, The Netherlands},
numpages = {7},
timestamp = {2017.05.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/huber2017tvx.pdf},
}
M. Eiband, M. Khamis, E. von Zezschwitz, H. Hussmann, and F. Alt. Understanding shoulder surfing in the wild: stories from users and observers. In Proceedings of the 2017 chi conference on human factors in computing systems (CHI ’17), ACM, New York, NY, USA, 2017, p. 4254–4265. doi:10.1145/3025453.3025636
[BibTeX] [Abstract] [PDF]
Research has brought forth a variety of authentication systems to mitigate observation attacks. However, there is little work about shoulder surfing situations in the real world. We present the results of a user survey (N=174) in which we investigate actual stories about shoulder surfing on mobile devices from both users and observers. Our analysis indicates that shoulder surfing mainly occurs in an opportunistic, non-malicious way. It usually does not have serious consequences, but evokes negative feelings for both parties, resulting in a variety of coping strategies. Observed data was personal in most cases and ranged from information about interests and hobbies to login data and intimate details about third persons and relationships. Thus, our work contributes evidence for shoulder surfing in the real world and informs implications for the design of privacy protection mechanisms.
@InProceedings{eiband2017chi,
author = {Eiband, Malin and Khamis, Mohamed and von Zezschwitz, Emanuel and Hussmann, Heinrich and Alt, Florian},
title = {Understanding Shoulder Surfing in the Wild: Stories from Users and Observers},
booktitle = {Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems},
year = {2017},
series = {CHI '17},
pages = {4254--4265},
address = {New York, NY, USA},
publisher = {ACM},
note = {eiband2017chi},
abstract = {Research has brought forth a variety of authentication systems to mitigate observation attacks. However, there is little work about shoulder surfing situations in the real world. We present the results of a user survey (N=174) in which we investigate actual stories about shoulder surfing on mobile devices from both users and observers. Our analysis indicates that shoulder surfing mainly occurs in an opportunistic, non-malicious way. It usually does not have serious consequences, but evokes negative feelings for both parties, resulting in a variety of coping strategies. Observed data was personal in most cases and ranged from information about interests and hobbies to login data and intimate details about third persons and relationships. Thus, our work contributes evidence for shoulder surfing in the real world and informs implications for the design of privacy protection mechanisms.},
acmid = {3025636},
doi = {10.1145/3025453.3025636},
isbn = {978-1-4503-4655-9},
keywords = {mobile devices, privacy, shoulder surfing},
numpages = {12},
timestamp = {2017.05.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/eiband2017chi.pdf},
}
Y. Abdelrahman, M. Khamis, S. Schneegass, and F. Alt. Stay cool! understanding thermal attacks on mobile-based user authentication. In Proceedings of the 35th annual acm conference on human factors in computing systems (CHI ’17), ACM, New York, NY, USA, 2017. doi:10.1145/3025453.3025461
[BibTeX] [Abstract] [PDF] [Video]
PINs and patterns remain among the most widely used knowledge-based authentication schemes. As thermal cameras become ubiquitous and affordable, we foresee a new form of threat to user privacy on mobile devices. Thermal cameras allow performing thermal attacks, where heat traces, resulting from authentication, can be used to reconstruct passwords. In this work we investigate in details the viability of exploiting thermal imaging to infer PINs and patterns on mobile devices. We present a study (N=18) where we evaluated how properties of PINs and patterns influence their thermal attacks resistance. We found that thermal attacks are indeed viable on mobile devices; overlapping patterns significantly decrease successful thermal attack rate from 100% to 16.67%, while PINs remain vulnerable (>72% success rate) even with duplicate digits. We conclude by recommendations for users and designers of authentication schemes on how to resist thermal attacks.
@InProceedings{abdelrahman2017chi,
author = {Abdelrahman, Yomna and Khamis, Mohamed and Schneegass, Stefan and Alt, Florian},
booktitle = {Proceedings of the 35th Annual ACM Conference on Human Factors in Computing Systems},
title = {Stay Cool! Understanding Thermal Attacks on Mobile-based User Authentication},
year = {2017},
address = {New York, NY, USA},
note = {abdelrahman2017chi},
publisher = {ACM},
series = {CHI '17},
abstract = {PINs and patterns remain among the most widely used knowledge-based authentication schemes. As thermal cameras become ubiquitous and affordable, we foresee a new form of threat to user privacy on mobile devices. Thermal cameras allow performing thermal attacks, where heat traces, resulting from authentication, can be used to reconstruct passwords. In this work we investigate in details the viability of exploiting thermal imaging to infer PINs and patterns on mobile devices. We present a study (N=18) where we evaluated how properties of PINs and patterns influence their thermal attacks resistance. We found that thermal attacks are indeed viable on mobile devices; overlapping patterns significantly decrease successful thermal attack rate from 100% to 16.67%, while PINs remain vulnerable (>72% success rate) even with duplicate digits. We conclude by recommendations for users and designers of authentication schemes on how to resist thermal attacks.},
doi = {10.1145/3025453.3025461},
location = {Denver, CO, USA},
timestamp = {2017.05.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdelrahman2017chi.pdf},
video = {abdelrahman2017chi},
}
M. Hassib, M. Pfeiffer, S. Schneegass, M. Rohs, and F. Alt. Emotion actuator: embodied emotional feedback through electroencephalography and electrical muscle stimulation. In Proceedings of the 2017 chi conference on human factors in computing systems (CHI ’17), ACM, New York, NY, USA, 2017, p. 6133–6146. doi:10.1145/3025453.3025953
[BibTeX] [Abstract] [PDF]
The human body reveals emotional and bodily states through measurable signals, such as body language and electroencephalography. However, such manifestations are difficult to communicate to others remotely. We propose EmotionActuator, a proof-of-concept system to investigate the transmission of emotional states in which the recipient performs emotional gestures to understand and interpret the state of the sender.We call this kind of communication embodied emotional feedback, and present a prototype implementation. To realize our concept we chose four emotional states: amused, sad, angry, and neutral. We designed EmotionActuator through a series of studies to assess emotional classification via EEG, and create an EMS gesture set by comparing composed gestures from the literature to sign-language gestures. Through a final study with the end-to-end prototype interviews revealed that participants like implicit sharing of emotions and find the embodied output to be immersive, but want to have control over shared emotions and with whom. This work contributes a proof of concept system and set of design recommendations for designing embodied emotional feedback systems.
@InProceedings{hassib2017chi3,
author = {Hassib, Mariam and Pfeiffer, Max and Schneegass, Stefan and Rohs, Michael and Alt, Florian},
title = {Emotion Actuator: Embodied Emotional Feedback Through Electroencephalography and Electrical Muscle Stimulation},
booktitle = {Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems},
year = {2017},
series = {CHI '17},
pages = {6133--6146},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2017chi3},
abstract = {The human body reveals emotional and bodily states through measurable signals, such as body language and electroencephalography. However, such manifestations are difficult to communicate to others remotely. We propose EmotionActuator, a proof-of-concept system to investigate the transmission of emotional states in which the recipient performs emotional gestures to understand and interpret the state of the sender.We call this kind of communication embodied emotional feedback, and present a prototype implementation. To realize our concept we chose four emotional states: amused, sad, angry, and neutral. We designed EmotionActuator through a series of studies to assess emotional classification via EEG, and create an EMS gesture set by comparing composed gestures from the literature to sign-language gestures. Through a final study with the end-to-end prototype interviews revealed that participants like implicit sharing of emotions and find the embodied output to be immersive, but want to have control over shared emotions and with whom. This work contributes a proof of concept system and set of design recommendations for designing embodied emotional feedback systems.},
acmid = {3025953},
doi = {10.1145/3025453.3025953},
isbn = {978-1-4503-4655-9},
keywords = {affect display, affective computing, eeg., emotion, emotion sharing, ems},
numpages = {14},
timestamp = {2017.05.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017chi3.pdf},
}
M. Hassib, S. Schneegass, P. Eiglsperger, N. Henze, A. Schmidt, and F. Alt. Engagemeter: a system for implicit audience engagement sensing using electroencephalography. In Proceedings of the 2017 chi conference on human factors in computing systems (CHI ’17), ACM, New York, NY, USA, 2017, p. 5114–5119. doi:10.1145/3025453.3025669
[BibTeX] [Abstract] [PDF]
{Obtaining information about audience engagement in presentations is a valuable asset for presenters in many domains. Prior literature mostly utilized explicit methods of collecting feedback which induce distractions, add workload on audience and do not provide objective information to presenters. We present EngageMeter – a system that allows fine-grained information on audience engagement to be obtained implicitly from multiple brain-computer interfaces (BCI) and to be fed back to presenters for real time and post-hoc access. Through evaluation during an HCI conference (Naudience=11
@InProceedings{hassib2017chi2,
author = {Hassib, Mariam and Schneegass, Stefan and Eiglsperger, Philipp and Henze, Niels and Schmidt, Albrecht and Alt, Florian},
title = {EngageMeter: A System for Implicit Audience Engagement Sensing Using Electroencephalography},
booktitle = {Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems},
year = {2017},
series = {CHI '17},
pages = {5114--5119},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2017chi2},
abstract = {Obtaining information about audience engagement in presentations is a valuable asset for presenters in many domains. Prior literature mostly utilized explicit methods of collecting feedback which induce distractions, add workload on audience and do not provide objective information to presenters. We present EngageMeter - a system that allows fine-grained information on audience engagement to be obtained implicitly from multiple brain-computer interfaces (BCI) and to be fed back to presenters for real time and post-hoc access. Through evaluation during an HCI conference (Naudience=11, Npresenters=3) we found that EngageMeter provides value to presenters (a) in real-time, since it allows reacting to current engagement scores by changing tone or adding pauses, and (b) in post-hoc, since presenters can adjust their slides and embed extra elements. We discuss how EngageMeter can be used in collocated and distributed audience sensing as well as how it can aid presenters in long term use.},
acmid = {3025669},
doi = {10.1145/3025453.3025669},
isbn = {978-1-4503-4655-9},
keywords = {audience feedback, bci, eeg, physiological sensing},
numpages = {6},
timestamp = {2017.05.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017chi2.pdf},
}
M. Hassib, D. Buschek, P. W. Wozniak, and F. Alt. Heartchat: heart rate augmented mobile chat to support empathy and awareness. In Proceedings of the 2017 chi conference on human factors in computing systems (CHI ’17), ACM, New York, NY, USA, 2017, p. 2239–2251. doi:10.1145/3025453.3025758
[BibTeX] [Abstract] [PDF]
Textual communication via mobile phones suffers from a lack of context and emotional awareness. We present a mobile chat application, HeartChat, which integrates heart rate as a cue to increase awareness and empathy. Through a literature review and a focus group, we identified design dimensions important for heart rate augmented chats. We created three concepts showing heart rate per message, in real-time, or sending it explicitly. We tested our system in a two week in-the-wild study with 14 participants (7 pairs). Interviews and questionnaires showed that HeartChat supports empathy between people, in particular close friends and partners. Sharing heart rate helped them to implicitly understand each other’s context (e.g. location, physical activity) and emotional state, and sparked curiosity on special occasions. We discuss opportunities, challenges, and design implications for enriching mobile chats with physiological sensing.
@InProceedings{hassib2017chi1,
author = {Hassib, Mariam and Buschek, Daniel and Wozniak, Pawe\l W. and Alt, Florian},
title = {HeartChat: Heart Rate Augmented Mobile Chat to Support Empathy and Awareness},
booktitle = {Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems},
year = {2017},
series = {CHI '17},
pages = {2239--2251},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2017chi1},
abstract = {Textual communication via mobile phones suffers from a lack of context and emotional awareness. We present a mobile chat application, HeartChat, which integrates heart rate as a cue to increase awareness and empathy. Through a literature review and a focus group, we identified design dimensions important for heart rate augmented chats. We created three concepts showing heart rate per message, in real-time, or sending it explicitly. We tested our system in a two week in-the-wild study with 14 participants (7 pairs). Interviews and questionnaires showed that HeartChat supports empathy between people, in particular close friends and partners. Sharing heart rate helped them to implicitly understand each other's context (e.g. location, physical activity) and emotional state, and sparked curiosity on special occasions. We discuss opportunities, challenges, and design implications for enriching mobile chats with physiological sensing.},
acmid = {3025758},
doi = {10.1145/3025453.3025758},
isbn = {978-1-4503-4655-9},
keywords = {affective computing, heart rate, instant messagingg, physiological sensing},
numpages = {13},
timestamp = {2017.05.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017chi1.pdf},
}
D. Buschek and F. Alt. Probui: generalising touch target representations to enable declarative gesture definition for probabilistic guis. In Proceedings of the 2017 chi conference on human factors in computing systems (CHI ’17), ACM, New York, NY, USA, 2017, p. 4640–4653. doi:10.1145/3025453.3025502
[BibTeX] [Abstract] [PDF]
We present ProbUI, a mobile touch GUI framework that merges ease of use of declarative gesture definition with the benefits of probabilistic reasoning. It helps developers to handle uncertain input and implement feedback and GUI adaptations. ProbUI replaces today’s static target models (bounding boxes) with probabilistic gestures (“bounding behaviours”). It is the first touch GUI framework to unite concepts from three areas of related work: 1) Developers declaratively define touch behaviours for GUI targets. As a key insight, the declarations imply simple probabilistic models (HMMs with 2D Gaussian emissions). 2) ProbUI derives these models automatically to evaluate users’ touch sequences. 3) It then infers intended behaviour and target. Developers bind callbacks to gesture progress, completion, and other conditions. We show ProbUI’s value by implementing existing and novel widgets, and report developer feedback from a survey and a lab study.
@InProceedings{buschek2017chi,
author = {Buschek, Daniel and Alt, Florian},
title = {ProbUI: Generalising Touch Target Representations to Enable Declarative Gesture Definition for Probabilistic GUIs},
booktitle = {Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems},
year = {2017},
series = {CHI '17},
pages = {4640--4653},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2017chi},
abstract = {We present ProbUI, a mobile touch GUI framework that merges ease of use of declarative gesture definition with the benefits of probabilistic reasoning. It helps developers to handle uncertain input and implement feedback and GUI adaptations. ProbUI replaces today's static target models (bounding boxes) with probabilistic gestures ("bounding behaviours"). It is the first touch GUI framework to unite concepts from three areas of related work: 1) Developers declaratively define touch behaviours for GUI targets. As a key insight, the declarations imply simple probabilistic models (HMMs with 2D Gaussian emissions). 2) ProbUI derives these models automatically to evaluate users' touch sequences. 3) It then infers intended behaviour and target. Developers bind callbacks to gesture progress, completion, and other conditions. We show ProbUI's value by implementing existing and novel widgets, and report developer feedback from a survey and a lab study.},
acmid = {3025502},
doi = {10.1145/3025453.3025502},
isbn = {978-1-4503-4655-9},
keywords = {gui framework, probabilistic modelling, touch gestures},
numpages = {14},
timestamp = {2017.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2017chi.pdf},
}
M. Khamis, R. Hasholzner, A. Bulling, and F. Alt. Gtmopass: two-factor authentication on public displays using gazetouch passwords and personal mobile devices. In Proceedings of the 6th international symposium on pervasive displays (PerDis ’17), ACM, New York, NY, USA, 2017. doi:10.1145/3078810.3078815
[BibTeX] [Abstract] [PDF]
As public displays continue to deliver increasingly private and personalized content, there is a need to ensure that only the legitimate users can access private information in sensitive contexts. While public displays can adopt similar authentication concepts like those used on public terminals (e.g., ATMs), authentication in public is subject to a number of risks. Namely, adversaries can uncover a user’s password through (1) shoulder surfing, (2) thermal attacks, or (3) smudge attacks. To address this problem we propose GTmoPass, an authentication architecture that enables Multi-factor user authentication on public displays. The first factor is a knowledge-factor: we employ a shoulder-surfing resilient multimodal scheme that combines gaze and touch input for password entry. The second factor is a possession-factor: users utilize their personal mobile devices, on which they enter the password. Credentials are securely transmitted to a server via Bluetooth beacons. We describe the implementation of GTmoPass and report on an evaluation of its usability and security, which shows that although authentication using GTmoPass is slightly slower than traditional methods, it protects against the three aforementioned threats.
@InProceedings{khamis2017perdis,
author = {Khamis, Mohamed and Hasholzner, Regina and Bulling, Andreas and Alt, Florian},
title = {GTmoPass: Two-factor Authentication on Public Displays Using GazeTouch passwords and Personal Mobile Devices},
booktitle = {Proceedings of the 6th International Symposium on Pervasive Displays},
year = {2017},
series = {PerDis '17},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2017perdis},
abstract = {As public displays continue to deliver increasingly private and personalized content, there is a need to ensure that only the legitimate users can access private information in sensitive contexts. While public displays can adopt similar authentication concepts like those used on public terminals (e.g., ATMs), authentication in public is subject to a number of risks. Namely, adversaries can uncover a user's password through (1) shoulder surfing, (2) thermal attacks, or (3) smudge attacks. To address this problem we propose GTmoPass, an authentication architecture that enables Multi-factor user authentication on public displays. The first factor is a knowledge-factor: we employ a shoulder-surfing resilient multimodal scheme that combines gaze and touch input for password entry. The second factor is a possession-factor: users utilize their personal mobile devices, on which they enter the password. Credentials are securely transmitted to a server via Bluetooth beacons. We describe the implementation of GTmoPass and report on an evaluation of its usability and security, which shows that although authentication using GTmoPass is slightly slower than traditional methods, it protects against the three aforementioned threats.},
acmid = {3078815},
doi = {10.1145/3078810.3078815},
isbn = {978-1-4503-5045-7/17/06},
location = {Lugano, Switzerland},
numpages = {9},
owner = {florian},
timestamp = {2017.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017perdis.pdf},
}
R. Linke, T. Kothe, and F. Alt. Tabooga: a hybrid learning app to support children’s reading motivation. In Proceedings of the 2017 conference on interaction design and children (IDC ’17), ACM, New York, NY, USA, 2017, p. 278–285. doi:10.1145/3078072.3079712
[BibTeX] [Abstract] [PDF]
@InProceedings{linke2017idc,
author = {Linke, Rebecca and Kothe, Tina and Alt, Florian},
title = {TaBooGa: A Hybrid Learning App to Support Children's Reading Motivation},
booktitle = {Proceedings of the 2017 Conference on Interaction Design and Children},
year = {2017},
series = {IDC '17},
pages = {278--285},
address = {New York, NY, USA},
publisher = {ACM},
acmid = {3079712},
doi = {10.1145/3078072.3079712},
isbn = {978-1-4503-4921-5},
keywords = {book-app, hybrid, literature, motivation, reading, tangible},
location = {Stanford, California, USA},
numpages = {8},
timestamp = {2017.05.02},
}
S. Oberhuber, T. Kothe, S. Schneegass, and F. Alt. Augmented games: exploring design opportunities in ar settings with children. In Proceedings of the 2017 conference on interaction design and children (IDC ’17), ACM, New York, NY, USA, 2017, p. 371–377. doi:10.1145/3078072.3079734
[BibTeX] [Abstract] [PDF]
In this paper we investigate how Augmented Reality (AR) technology influences children during creative content generation in playful settings. The work is motivated by the recent spread of AR and the fact that children get in touch with this technology through their smart phones very early on. To understand the consequences, we implemented an app for smart mobile devices that allows children to create treasure hunts using GPS coordinates and marker-based AR functionality. During a qualitative user study, we asked students (n=27) to create traditional (paper + art supplies) and digital (paper + art supplies + AR app) treasure hunts and compared the resulting games, among other metrics, in terms of complexity, length and types of media used. Whereas traditional treasure hunts were linear, centered around locations and delivered information with text only, digital treasure hunts were more complex, focused on visual aspects and frequently integrated storytelling.
@InProceedings{oberhuber2017idc,
author = {Oberhuber, Sascha and Kothe, Tina and Schneegass, Stefan and Alt, Florian},
title = {Augmented Games: Exploring Design Opportunities in AR Settings With Children},
booktitle = {Proceedings of the 2017 Conference on Interaction Design and Children},
year = {2017},
series = {IDC '17},
pages = {371--377},
address = {New York, NY, USA},
publisher = {ACM},
note = {oberhuber2017idc},
abstract = {In this paper we investigate how Augmented Reality (AR) technology influences children during creative content generation in playful settings. The work is motivated by the recent spread of AR and the fact that children get in touch with this technology through their smart phones very early on. To understand the consequences, we implemented an app for smart mobile devices that allows children to create treasure hunts using GPS coordinates and marker-based AR functionality. During a qualitative user study, we asked students (n=27) to create traditional (paper + art supplies) and digital (paper + art supplies + AR app) treasure hunts and compared the resulting games, among other metrics, in terms of complexity, length and types of media used. Whereas traditional treasure hunts were linear, centered around locations and delivered information with text only, digital treasure hunts were more complex, focused on visual aspects and frequently integrated storytelling.},
acmid = {3079734},
doi = {10.1145/3078072.3079734},
isbn = {978-1-4503-4921-5},
keywords = {AR, children, creativity, education, storytelling},
location = {Stanford, California, USA},
numpages = {7},
timestamp = {2017.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/oberhuber2017idc.pdf},
}
M. Al Sada, M. Khamis, A. Kato, S. Sugano, T. Nakajima, and F. Alt. Challenges and opportunities of supernumerary robotic limbs. In Proceedings of the chi 2017 workshop on amplification and augmentation of human perception (amplify 2017) (Amplify ’17), New York, NY, USA, 2017.
[BibTeX] [Abstract] [PDF]
Recent advancements in robotics and wearables made itpossible to augment humans with additional robotic limbs(e.g., extra pair of arms). However, these advances havebeen dispersed among different research communities withvery little attention to the user’s perspective. In this work wetake a first step to close this gap. We report on the resultsof two focus groups that uncovered expectations and concernsof potential users of Supernumerary Robotic Limbs(SRLs). There is a wide range of applications for SRLswithin daily usage contexts, like enabling new perceptions,commuting and communication methods as well as enhancingexisting ones. Yet, several requirements need to be metbefore SRLs can be widely adopted, such as multipurposedesign and adequate sensory feedback. We discuss howthese findings influence the design of future SRLs.
@InProceedings{alsada2017amplify,
author = {Al Sada, Mohammed and Khamis, Mohamed and Kato, Akira and Sugano, Shigeki and Nakajima, Tatsuo and Alt, Florian},
title = {Challenges and Opportunities of Supernumerary Robotic Limbs},
booktitle = {Proceedings of the CHI 2017 Workshop on Amplification and Augmentation of Human Perception (Amplify 2017)},
year = {2017},
series = {Amplify '17},
address = {New York, NY, USA},
abstract = {Recent advancements in robotics and wearables made itpossible to augment humans with additional robotic limbs(e.g., extra pair of arms). However, these advances havebeen dispersed among different research communities withvery little attention to the user’s perspective. In this work wetake a first step to close this gap. We report on the resultsof two focus groups that uncovered expectations and concernsof potential users of Supernumerary Robotic Limbs(SRLs). There is a wide range of applications for SRLswithin daily usage contexts, like enabling new perceptions,commuting and communication methods as well as enhancingexisting ones. Yet, several requirements need to be metbefore SRLs can be widely adopted, such as multipurposedesign and adequate sensory feedback. We discuss howthese findings influence the design of future SRLs.},
location = {Denver, CO, USA},
timestamp = {2017.05.01},
}
O. Duerr, M. Khamis, D. Buschek, and F. Alt. Helpme: assisting older adults in performing tasks on mobile devices. In Proceedings of the chi 2017 workshop on designing mobile interactions for the ageing populations (), New York, NY, USA, 2017.
[BibTeX] [Abstract] [PDF]
Although mobile devices are becoming more ubiquitous,older adults have trouble catching up with the dynamics oftechnological innovation in smartphones. Most custom solutionsfor them rely on a proprietary UI with an extenuatednumber of interaction possibilities. While these solutions dohelp with basic tasks such as calling the right person, manyof the benefits of having a smartphone are clearly dislodged.We introduce and evaluate a prototype, HelpMe, forolder adult users who want to use more demanding Appswithout external assistance. Through a prestudy we uncovereda set of behaviors that imply that the user needs assistance.By detecting these behaviors or by manual request,HelpMe overlays information that explain to the user whatcan be done on the current screen and what the different UIsymbols resemble. We evaluated HelpMe in a subsequentstudy where we collected feedback and measured the workload.Our findings show that older adult users would benefitfrom HelpMe, and that it reduces the perceived workload.
@InProceedings{duerr2017olderadults,
author = {Duerr, Oliver and Khamis, Mohamed and Buschek, Daniel and Alt, Florian},
title = {HelpMe: Assisting Older Adults in Performing Tasks on Mobile Devices},
booktitle = {Proceedings of the CHI 2017 Workshop on Designing Mobile Interactions for the Ageing Populations},
year = {2017},
address = {New York, NY, USA},
abstract = {Although mobile devices are becoming more ubiquitous,older adults have trouble catching up with the dynamics oftechnological innovation in smartphones. Most custom solutionsfor them rely on a proprietary UI with an extenuatednumber of interaction possibilities. While these solutions dohelp with basic tasks such as calling the right person, manyof the benefits of having a smartphone are clearly dislodged.We introduce and evaluate a prototype, HelpMe, forolder adult users who want to use more demanding Appswithout external assistance. Through a prestudy we uncovereda set of behaviors that imply that the user needs assistance.By detecting these behaviors or by manual request,HelpMe overlays information that explain to the user whatcan be done on the current screen and what the different UIsymbols resemble. We evaluated HelpMe in a subsequentstudy where we collected feedback and measured the workload.Our findings show that older adult users would benefitfrom HelpMe, and that it reduces the perceived workload.},
location = {Denver, CO, USA},
timestamp = {2017.05.01},
}
C. George, M. Khamis, M. Burger, H. Schmidt, F. Alt, and H. Hussmann. Seamless and secure vr: adapting and evaluating established authentication systems for virtual reality. In Proceedings of the usable security mini conference 2017 (), Internet Society, San Diego, CA, USA, 2017.
[BibTeX] [Abstract] [PDF]
Virtual reality (VR) headsets are enabling a wide range of newopportunities for the user. For example, in the near future usersmay be able to visit virtual shopping malls and virtually joininternational conferences. These and many other scenarios posenew questions with regards to privacy and security, in particularauthentication of users within the virtual environment. As a firststep towards seamless VR authentication, this paper investigatesthe direct transfer of well-established concepts (PIN, Androidunlock patterns) into VR. In a pilot study (N = 5) and a labstudy (N = 25), we adapted existing mechanisms and evaluatedtheir usability and security for VR. The results indicate thatboth PINs and patterns are well suited for authentication inVR. We found that the usability of both methods matched theperformance known from the physical world. In addition, theprivate visual channel makes authentication harder to observe,indicating that authentication in VR using traditional conceptsalready achieves a good balance in the trade-off between usabilityand security. The paper contributes to a better understanding ofauthentication within VR environments, by providing the firstinvestigation of established authentication methods within VR,and presents the base layer for the design of future authenticationschemes, which are used in VR environments only.
@InProceedings{george2017usec,
author = {Ceenu George AND Mohamed Khamis AND Marinus Burger AND Henri Schmidt AND Florian Alt AND Heinrich Hussmann},
title = {Seamless and Secure VR: Adapting and Evaluating Established Authentication Systems for Virtual Reality},
booktitle = {Proceedings of the Usable Security Mini Conference 2017},
year = {2017},
address = {San Diego, CA, USA},
publisher = {Internet Society},
note = {george2017usec},
abstract = {Virtual reality (VR) headsets are enabling a wide range of newopportunities for the user. For example, in the near future usersmay be able to visit virtual shopping malls and virtually joininternational conferences. These and many other scenarios posenew questions with regards to privacy and security, in particularauthentication of users within the virtual environment. As a firststep towards seamless VR authentication, this paper investigatesthe direct transfer of well-established concepts (PIN, Androidunlock patterns) into VR. In a pilot study (N = 5) and a labstudy (N = 25), we adapted existing mechanisms and evaluatedtheir usability and security for VR. The results indicate thatboth PINs and patterns are well suited for authentication inVR. We found that the usability of both methods matched theperformance known from the physical world. In addition, theprivate visual channel makes authentication harder to observe,indicating that authentication in VR using traditional conceptsalready achieves a good balance in the trade-off between usabilityand security. The paper contributes to a better understanding ofauthentication within VR environments, by providing the firstinvestigation of established authentication methods within VR,and presents the base layer for the design of future authenticationschemes, which are used in VR environments only.},
owner = {florian},
timestamp = {2017.02.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/george2017usec.pdf},
}

### 2016

E. von Zezschwitz, M. Eiband, D. Buschek, S. Oberhuber, A. De Luca, F. Alt, and H. Hussmann. On quantifying the effective password space of grid-based unlock gestures. In Proceedings of the 15th international conference on mobile and ubiquitous multimedia (MUM ’16), ACM, New York, NY, USA, 2016, p. 201–212. doi:10.1145/3012709.3012729
[BibTeX] [Abstract] [PDF]
We present a similarity metric for Android unlock patterns to quantify the effective password space of user-defined gestures. Our metric is the first of its kind to reflect that users choose patterns based on human intuition and interest in geometric properties of the resulting shapes. Applying our metric to a dataset of 506 user-defined patterns reveals very similar shapes that only differ by simple geometric transformations such as rotation. This shrinks the effective password space by 66% and allows informed guessing attacks. Consequently, we present an approach to subtly nudge users to create more diverse patterns by showing background images and animations during pattern creation. Results from a user study (n = 496) show that applying such countermeasures can significantly increase pattern diversity. We conclude with implications for pattern choices and the design of enrollment processes.
@InProceedings{zezschwitz2016mum,
author = {von Zezschwitz, Emanuel and Eiband, Malin and Buschek, Daniel and Oberhuber, Sascha and De Luca, Alexander and Alt, Florian and Hussmann, Heinrich},
title = {On Quantifying the Effective Password Space of Grid-based Unlock Gestures},
booktitle = {Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia},
year = {2016},
series = {MUM '16},
pages = {201--212},
address = {New York, NY, USA},
publisher = {ACM},
note = {zezschwitz2016mum},
abstract = {We present a similarity metric for Android unlock patterns to quantify the effective password space of user-defined gestures. Our metric is the first of its kind to reflect that users choose patterns based on human intuition and interest in geometric properties of the resulting shapes. Applying our metric to a dataset of 506 user-defined patterns reveals very similar shapes that only differ by simple geometric transformations such as rotation. This shrinks the effective password space by 66% and allows informed guessing attacks. Consequently, we present an approach to subtly nudge users to create more diverse patterns by showing background images and animations during pattern creation. Results from a user study (n = 496) show that applying such countermeasures can significantly increase pattern diversity. We conclude with implications for pattern choices and the design of enrollment processes.},
acmid = {3012729},
doi = {10.1145/3012709.3012729},
isbn = {978-1-4503-4860-7},
keywords = {metric, password space, security, similarity, unlock pattern, user selection},
location = {Rovaniemi, Finland},
numpages = {12},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/zezschwitz2016mum.pdf},
}
M. Khamis, L. Trotter, M. Tessmann, C. Dannhart, A. Bulling, and F. Alt. Eyevote in the wild: do users bother correcting system errors on public displays?. In Proceedings of the 15th international conference on mobile and ubiquitous multimedia (MUM ’16), ACM, New York, NY, USA, 2016, p. 57–62. doi:10.1145/3012709.3012743
[BibTeX] [Abstract] [PDF]
Although recovering from errors is straightforward on most interfaces, public display systems pose very unique design challenges. Namely, public display users interact for very short amounts of times and are believed to abandon the display when interrupted or forced to deviate from the main task. To date, it is not well understood whether public display designers should enable users to correct errors (e.g. by asking users to confirm or giving them a chance correct their input), or aim for faster interaction and rely on other types of feedback to estimate errors. To close this gap, we conducted a field study where we investigated the users willingness to correct their input on public displays. We report on our findings from an in-the-wild deployment of a public gaze-based voting system where we intentionally evoked system errors to see if users correct them. We found that public display users are willing to correct system errors provided that the correction is fast and straightforward. We discuss how our findings influence the choice of interaction methods for public displays; interaction methods that are highly usable but suffer from low accuracy can still be effective if users can “undo” their interactions.
@InProceedings{khamis2016mum,
author = {Khamis, Mohamed and Trotter, Ludwig and Tessmann, Markus and Dannhart, Christina and Bulling, Andreas and Alt, Florian},
title = {EyeVote in the Wild: Do Users Bother Correcting System Errors on Public Displays?},
booktitle = {Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia},
year = {2016},
series = {MUM '16},
pages = {57--62},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2016mum},
abstract = {Although recovering from errors is straightforward on most interfaces, public display systems pose very unique design challenges. Namely, public display users interact for very short amounts of times and are believed to abandon the display when interrupted or forced to deviate from the main task. To date, it is not well understood whether public display designers should enable users to correct errors (e.g. by asking users to confirm or giving them a chance correct their input), or aim for faster interaction and rely on other types of feedback to estimate errors. To close this gap, we conducted a field study where we investigated the users willingness to correct their input on public displays. We report on our findings from an in-the-wild deployment of a public gaze-based voting system where we intentionally evoked system errors to see if users correct them. We found that public display users are willing to correct system errors provided that the correction is fast and straightforward. We discuss how our findings influence the choice of interaction methods for public displays; interaction methods that are highly usable but suffer from low accuracy can still be effective if users can "undo" their interactions.},
acmid = {3012743},
doi = {10.1145/3012709.3012743},
isbn = {978-1-4503-4860-7},
keywords = {gaze interaction, public displays, smooth pursuit, voting},
location = {Rovaniemi, Finland},
numpages = {6},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016mum.pdf},
}
N. Broy, V. Lindner, and F. Alt. The s3d-ui designer: creating user interface prototypes for 3d displays. In Proceedings of the 15th international conference on mobile and ubiquitous multimedia (MUM ’16), ACM, New York, NY, USA, 2016, p. 49–55. doi:10.1145/3012709.3012727
[BibTeX] [Abstract] [PDF]
In this paper, we present the S3D-UI Designer –- a tool to create prototypes for 3D displays. Stereoscopic (S3D) displays are quickly becoming popular beyond cinemas and home entertainments. S3D displays can today already be found in mobile phones, public displays, and car dashboards. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. At the same time, prototyping these UIs is challenging, as with traditional techniques, UI elements cannot easily be rendered and positioned in 3D space. In contrast to professional 3D authoring tools, we present a tool targeted towards non-experts to quickly and easily sketch a S3D UI and instantly render it on a 3D display. We report on the design of the tool by means of a workshop and present an evaluation study with 26 participants assessing its usabilty.
@InProceedings{broy2016mum,
author = {Broy, Nora and Lindner, Verena and Alt, Florian},
title = {The S3D-UI Designer: Creating User Interface Prototypes for 3D Displays},
booktitle = {Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia},
year = {2016},
series = {MUM '16},
pages = {49--55},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2016mum},
abstract = {In this paper, we present the S3D-UI Designer --- a tool to create prototypes for 3D displays. Stereoscopic (S3D) displays are quickly becoming popular beyond cinemas and home entertainments. S3D displays can today already be found in mobile phones, public displays, and car dashboards. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. At the same time, prototyping these UIs is challenging, as with traditional techniques, UI elements cannot easily be rendered and positioned in 3D space. In contrast to professional 3D authoring tools, we present a tool targeted towards non-experts to quickly and easily sketch a S3D UI and instantly render it on a 3D display. We report on the design of the tool by means of a workshop and present an evaluation study with 26 participants assessing its usabilty.},
acmid = {3012727},
doi = {10.1145/3012709.3012727},
isbn = {978-1-4503-4860-7},
keywords = {prototyping, stereoscopic 3D, user interfaces},
location = {Rovaniemi, Finland},
numpages = {7},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2016mum.pdf},
}
F. Alt, M. Mikusz, S. Schneegass, and A. Bulling. Memorability of cued-recall graphical passwords with saliency masks. In Proceedings of the 15th international conference on mobile and ubiquitous multimedia (MUM ’16), ACM, New York, NY, USA, 2016, p. 191–200. doi:10.1145/3012709.3012730
[BibTeX] [Abstract] [PDF]
@InProceedings{alt2016mum,
author = {Alt, Florian and Mikusz, Mateusz and Schneegass, Stefan and Bulling, Andreas},
booktitle = {Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia},
year = {2016},
series = {MUM '16},
pages = {191--200},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2016mum},
acmid = {3012730},
doi = {10.1145/3012709.3012730},
isbn = {978-1-4503-4860-7},
keywords = {cued-recall graphical passwords, memorability, saliency masks, user authentication, user study},
location = {Rovaniemi, Finland},
numpages = {10},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016mum.pdf},
}
Proceedings of the 15th international conference on mobile and ubiquitous multimediaNew York, NY, USA: ACM, 2016.
[BibTeX] [PDF]
@Proceedings{alt2016mumproc,
title = {Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia},
year = {2016},
series = {MUM '16},
address = {New York, NY, USA},
publisher = {ACM},
isbn = {978-1-4503-4860-7},
note = {alt2016mumproc},
location = {Rovaniemi, Finland},
timestamp = {2016.12.14},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016mumproc.pdf},
}
F. Steinberger, P. Proppe, R. Schroeter, and F. Alt. Coastmaster: an ambient speedometer to gamify safe driving. In Proceedings of the 8th international conference on automotive user interfaces and interactive vehicular applications (Automotive’UI 16), ACM, New York, NY, USA, 2016, p. 83–90. doi:10.1145/3003715.3005412
[BibTeX] [Abstract] [PDF]
We present CoastMaster, a smartphone application that serves as an ambient speedometer and driving game display. Our work is motivated by the need to re-engage drivers in the driving task, e.g., in situations where manoeuvering the vehicle is straightforward and does not require high levels of engagement. CoastMaster supports drivers during speed limit changes by (a) re-engaging them in the driving task, and; (b) providing feedback on driving behaviour. In a simulator study (N=24), we compare a gamified and a non-gamified interface with regards to user experience, driving performance, and visual distraction. Our results indicate an increase in hedonic quality and driver engagement as well as a decrease in speed violations through the gamified condition. At the same time, the gamified version leads to longer glances towards the display suggesting visual distraction. Our study findings inform specific design recommendations for ambient interfaces and gamified driving.
@InProceedings{steinberger2016autoui,
author = {Steinberger, Fabius and Proppe, Patrick and Schroeter, Ronald and Alt, Florian},
title = {CoastMaster: An Ambient Speedometer to Gamify Safe Driving},
booktitle = {Proceedings of the 8th International Conference on Automotive User Interfaces and Interactive Vehicular Applications},
year = {2016},
series = {Automotive'UI 16},
pages = {83--90},
address = {New York, NY, USA},
publisher = {ACM},
note = {steinberger2016autoui},
abstract = {We present CoastMaster, a smartphone application that serves as an ambient speedometer and driving game display. Our work is motivated by the need to re-engage drivers in the driving task, e.g., in situations where manoeuvering the vehicle is straightforward and does not require high levels of engagement. CoastMaster supports drivers during speed limit changes by (a) re-engaging them in the driving task, and; (b) providing feedback on driving behaviour. In a simulator study (N=24), we compare a gamified and a non-gamified interface with regards to user experience, driving performance, and visual distraction. Our results indicate an increase in hedonic quality and driver engagement as well as a decrease in speed violations through the gamified condition. At the same time, the gamified version leads to longer glances towards the display suggesting visual distraction. Our study findings inform specific design recommendations for ambient interfaces and gamified driving.},
acmid = {3005412},
doi = {10.1145/3003715.3005412},
isbn = {978-1-4503-4533-0},
keywords = {Ambient interface, design approach, distraction, gamification, interactive experience, vehicle-based apps},
location = {Ann Arbor, MI, USA},
numpages = {8},
timestamp = {2016.10.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/steinberger2016autoui.pdf},
}
M. Khamis, A. Klimczak, M. Reiss, F. Alt, and A. Bulling. Eyescout: active eye tracking for position and movementindependent gaze interaction with large public displays. In Proceedings of the 30th annual acm symposium on user interface software & technology (UIST ’17), ACM, New York, NY, USA, 2016. doi:10.1145/3126594.3126630
[BibTeX] [Abstract] [PDF]
While gaze holds a lot of promise for hands-free interaction with public displays, remote eye trackers with their confined tracking box restrict users to a single stationary position in front of the display. We present EyeScout, an active eye tracking system that combines an eye tracker mounted on a rail system with a computational method to automatically detect and align the tracker with the user’s lateral movement. EyeScout addresses key limitations of current gaze-enabled large public displays by offering two novel gaze-interaction modes for a single user: In “Walk then Interact” the user can walk up to an arbitrary position in front of the display and interact, while in “Walk and Interact” the user can interact even while on the move. We report on a user study that shows that EyeScout is well perceived by users, extends a public display’s sweet spot into a sweet line, and reduces gaze interaction kick-off time to 3.5 seconds – a 62% improvement over state of the art solutions. We discuss sample applications that demonstrate how EyeScout can enable position and movement-independent gaze interaction with large public displays.
@InProceedings{khamis2017uist,
author = {Khamis, Mohamed and Klimczak, Alexander and Reiss, Martin and Alt, Florian and Bulling, Andreas},
title = {EyeScout: Active Eye Tracking for Position and MovementIndependent Gaze Interaction with Large Public Displays},
booktitle = {Proceedings of the 30th Annual ACM Symposium on User Interface Software \& Technology},
year = {2016},
series = {UIST '17},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2017uist},
abstract = {While gaze holds a lot of promise for hands-free interaction with public displays, remote eye trackers with their confined tracking box restrict users to a single stationary position in front of the display. We present EyeScout, an active eye tracking system that combines an eye tracker mounted on a rail system with a computational method to automatically detect and align the tracker with the user's lateral movement. EyeScout addresses key limitations of current gaze-enabled large public displays by offering two novel gaze-interaction modes for a single user: In "Walk then Interact" the user can walk up to an arbitrary position in front of the display and interact, while in "Walk and Interact" the user can interact even while on the move. We report on a user study that shows that EyeScout is well perceived by users, extends a public display's sweet spot into a sweet line, and reduces gaze interaction kick-off time to 3.5 seconds -- a 62% improvement over state of the art solutions. We discuss sample applications that demonstrate how EyeScout can enable position and movement-independent gaze interaction with large public displays.},
acmid = {3126630},
doi = {10.1145/3126594.3126630},
isbn = {978-1-4503-4981-9/17/10},
location = {Quebec City, QC, Canada},
numpages = {12},
timestamp = {2016.10.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017uist.pdf},
}
M. Khamis, O. Saltuk, A. Hang, K. Stolz, A. Bulling, and F. Alt. Textpursuits: using text for pursuits-based interaction and calibration on public displays. In Proceedings of the 2016 acm international joint conference on pervasive and ubiquitous computing (UbiComp ’16), ACM, New York, NY, USA, 2016, p. 274–285. doi:10.1145/2971648.2971679
[BibTeX] [Abstract] [PDF]
In this paper we show how reading text on large display can be used to enable gaze interaction in public space. Our research is motivated by the fact that much of the content on public displays includes text. Hence, researchers and practitioners could greatly benefit from users being able to spontaneously interact as well as to implicitly calibrate an eye tracker while simply reading this text. In particular, we adapt Pursuits, a technique that correlates users’ eye movements with moving on-screen targets. While prior work used abstract objects or dots as targets, we explore the use of Pursuits with text (read-and-pursue). Thereby we address the challenge that eye movements performed while reading interfere with the pursuit movements. Results from two user studies (N=37) show that Pursuits with text is feasible and can achieve similar accuracy as non text-based pursuit approaches. While calibration is less accurate, it integrates smoothly with reading and allows areas of the display the user is looking at to be identified.
@InProceedings{khamis2016ubicomp,
author = {Khamis, Mohamed and Saltuk, Ozan and Hang, Alina and Stolz, Katharina and Bulling, Andreas and Alt, Florian},
title = {TextPursuits: Using Text for Pursuits-based Interaction and Calibration on Public Displays},
booktitle = {Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing},
year = {2016},
series = {UbiComp '16},
pages = {274--285},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2016ubicomp},
abstract = {In this paper we show how reading text on large display can be used to enable gaze interaction in public space. Our research is motivated by the fact that much of the content on public displays includes text. Hence, researchers and practitioners could greatly benefit from users being able to spontaneously interact as well as to implicitly calibrate an eye tracker while simply reading this text. In particular, we adapt Pursuits, a technique that correlates users' eye movements with moving on-screen targets. While prior work used abstract objects or dots as targets, we explore the use of Pursuits with text (read-and-pursue). Thereby we address the challenge that eye movements performed while reading interfere with the pursuit movements. Results from two user studies (N=37) show that Pursuits with text is feasible and can achieve similar accuracy as non text-based pursuit approaches. While calibration is less accurate, it integrates smoothly with reading and allows areas of the display the user is looking at to be identified.},
acmid = {2971679},
doi = {10.1145/2971648.2971679},
isbn = {978-1-4503-4461-6},
keywords = {gaze interaction, public displays, smooth pursuit, text},
location = {Heidelberg, Germany},
numpages = {12},
timestamp = {2016.09.18},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016ubicomp.pdf},
}
M. Khamis, F. Alt, and A. Bulling. Challenges and design space of gaze-enabled public displays. In Proceedings of the 2016 acm international joint conference on pervasive and ubiquitous computing (PETMEI ’16), ACM, New York, NY, USA, 2016. doi:10.1145/2968219.2968342
[BibTeX] [Abstract] [PDF]
Gaze is an attractive modality for public displays, hence the recent years saw an increase in deployments of gaze-enabled public displays. Although gaze has been thoroughly investigated for desktop scenarios, gaze-enabled public displays present new challenges that are unique to this setup. In contrast to desktop settings, public displays (1) cannot afford requiring eye tracker calibration, (2) expect users to interact from different positions, and (3) expect multiple users to interact simultaneously. In this work we discuss these challenges, and explore the design space of gaze-enabled public displays. We conclude by discussing how the current state of research stands wrt.~the identified challenges, and highlight directions for future work.
@InProceedings{khamis2016petmei,
author = {Khamis, Mohamed and Alt, Florian and Bulling, Andreas},
title = {Challenges and Design Space of Gaze-enabled Public Displays},
booktitle = {Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing},
year = {2016},
series = {PETMEI '16},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2016petmei},
abstract = {Gaze is an attractive modality for public displays, hence the recent years saw an increase in deployments of gaze-enabled public displays. Although gaze has been thoroughly investigated for desktop scenarios, gaze-enabled public displays present new challenges that are unique to this setup. In contrast to desktop settings, public displays (1) cannot afford requiring eye tracker calibration, (2) expect users to interact from different positions, and (3) expect multiple users to interact simultaneously. In this work we discuss these challenges, and explore the design space of gaze-enabled public displays. We conclude by discussing how the current state of research stands wrt.~the identified challenges, and highlight directions for future work.},
doi = {10.1145/2968219.2968342},
location = {Heidelberg, Germany},
numpages = {10},
timestamp = {2016.09.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016petmei.pdf},
}
F. Alt and J. Vehns. Opportunistic deployments: challenges and opportunities of conducting public display research at an airport. In Proceedings of the 5th acm international symposium on pervasive displays (PerDis ’16), ACM, New York, NY, USA, 2016, p. 106–117. doi:10.1145/2914920.2915020
[BibTeX] [Abstract] [PDF]
In this paper, we report on the design, development, and deployment of an interactive shopping display at a major European airport. The ability to manufacture displays in arbitrary size and form factors as well as their networking capabilities allow public displays to be deployed in almost any location and target a huge variety of audiences. At the same time, this makes it difficult for researchers to gather generalizable insights on audience behavior. Rather, findings are often very specific to a particular deployment. We argue that in order to develop a comprehensive understanding of how successful interactive display installations can be created, researchers need to explore an as large variety of situations as possible. We contribute to this understanding by providing insights from a deployment in a security critical environment and involving multiple stakeholders where the audience is encountered in different situations (waiting, passing-by). Our insights are valuable for both researchers and practitioners, operating interactive display deployments.
@InProceedings{alt2016perdis2,
author = {Alt, Florian and Vehns, Julia},
title = {Opportunistic Deployments: Challenges and Opportunities of Conducting Public Display Research at an Airport},
booktitle = {Proceedings of the 5th ACM International Symposium on Pervasive Displays},
year = {2016},
series = {PerDis '16},
pages = {106--117},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2016perdis2},
abstract = {In this paper, we report on the design, development, and deployment of an interactive shopping display at a major European airport. The ability to manufacture displays in arbitrary size and form factors as well as their networking capabilities allow public displays to be deployed in almost any location and target a huge variety of audiences. At the same time, this makes it difficult for researchers to gather generalizable insights on audience behavior. Rather, findings are often very specific to a particular deployment. We argue that in order to develop a comprehensive understanding of how successful interactive display installations can be created, researchers need to explore an as large variety of situations as possible. We contribute to this understanding by providing insights from a deployment in a security critical environment and involving multiple stakeholders where the audience is encountered in different situations (waiting, passing-by). Our insights are valuable for both researchers and practitioners, operating interactive display deployments.},
acmid = {2915020},
doi = {10.1145/2914920.2915020},
isbn = {978-1-4503-4366-4},
keywords = {audience behavior, deployment-based research, interaction, public displays, shopping},
location = {Oulu, Finland},
numpages = {12},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016perdis2.pdf},
}
S. Schneegass, S. Ogando, and F. Alt. Using on-body displays for extending the output of wearable devices. In Proceedings of the 5th acm international symposium on pervasive displays (PerDis ’16), ACM, New York, NY, USA, 2016, p. 67–74. doi:10.1145/2914920.2915021
[BibTeX] [Abstract] [PDF]
In this work, we explore wearable on-body displays. These displays have the potential of extending the display space of smart watches to the user’s body. Our research is motivated by wearable computing devices moving technology closer to the human. Today, smart watches offer functionalities similar to smart phones, yet at a smaller form factor. To cope with the limited display real-estate we propose to use on-body displays integrated with clothing to extend the available display space. We present a design space for on-body displays and explore users’ location and visualization preferences. We also report on the design and implementation of a prototypical display system. We evaluated the prototype in a lab study with 16 participants, showing that on-body displays perform similar to current off-screen visualization techniques.
@InProceedings{schneegass2016perdis,
author = {Schneegass, Stefan and Ogando, Sophie and Alt, Florian},
title = {Using On-body Displays for Extending the Output of Wearable Devices},
booktitle = {Proceedings of the 5th ACM International Symposium on Pervasive Displays},
year = {2016},
series = {PerDis '16},
pages = {67--74},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneegass2016perdis},
abstract = {In this work, we explore wearable on-body displays. These displays have the potential of extending the display space of smart watches to the user's body. Our research is motivated by wearable computing devices moving technology closer to the human. Today, smart watches offer functionalities similar to smart phones, yet at a smaller form factor. To cope with the limited display real-estate we propose to use on-body displays integrated with clothing to extend the available display space. We present a design space for on-body displays and explore users' location and visualization preferences. We also report on the design and implementation of a prototypical display system. We evaluated the prototype in a lab study with 16 participants, showing that on-body displays perform similar to current off-screen visualization techniques.},
acmid = {2915021},
doi = {10.1145/2914920.2915021},
isbn = {978-1-4503-4366-4},
keywords = {focus + context, on-body display, smart textiles, wearable computing},
location = {Oulu, Finland},
numpages = {8},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2016perdis.pdf},
}
M. Baldauf, F. Adegeye, F. Alt, and J. Harms. Your browser is the controller: advanced web-based smartphone remote controls for public screens. In Proceedings of the 5th acm international symposium on pervasive displays (PerDis ’16), ACM, New York, NY, USA, 2016, p. 175–181. doi:10.1145/2914920.2915026
[BibTeX] [Abstract] [PDF]
In recent years, a lot of research focused on using smartphones as input devices for distant screens, in many cases by means of native applications. At the same time, prior work often ignored the downsides of native applications for practical usage, such as the need for download and the required installation process. This hampers the spontaneous use of an interactive service. To address the aforementioned drawbacks, we introduce ATREUS, an open-source framework which enables creating and provisioning manifold mobile remote controls as plain web applications. We describe the basic architecture of ATREUS and present four functional remote controls realized using the framework. Two sophisticated controls, the Mini Video and the Smart Lens approach, have been previously implemented as native applications only. Furthermore, we report on lessons learned for realizing web-based remote controls during functional tests and finally present the results of an informal user study.
@InProceedings{baldauf2016perdis,
author = {Baldauf, Matthias and Adegeye, Florence and Alt, Florian and Harms, Johannes},
title = {Your Browser is the Controller: Advanced Web-based Smartphone Remote Controls for Public Screens},
booktitle = {Proceedings of the 5th ACM International Symposium on Pervasive Displays},
year = {2016},
series = {PerDis '16},
pages = {175--181},
address = {New York, NY, USA},
publisher = {ACM},
note = {baldauf2016perdis},
abstract = {In recent years, a lot of research focused on using smartphones as input devices for distant screens, in many cases by means of native applications. At the same time, prior work often ignored the downsides of native applications for practical usage, such as the need for download and the required installation process. This hampers the spontaneous use of an interactive service. To address the aforementioned drawbacks, we introduce ATREUS, an open-source framework which enables creating and provisioning manifold mobile remote controls as plain web applications. We describe the basic architecture of ATREUS and present four functional remote controls realized using the framework. Two sophisticated controls, the Mini Video and the Smart Lens approach, have been previously implemented as native applications only. Furthermore, we report on lessons learned for realizing web-based remote controls during functional tests and finally present the results of an informal user study.},
acmid = {2915026},
doi = {10.1145/2914920.2915026},
isbn = {978-1-4503-4366-4},
keywords = {interaction, public display, remote control, smartphone},
location = {Oulu, Finland},
numpages = {7},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/baldauf2016perdis.pdf},
}
F. Alt, S. Torma, and D. Buschek. Don’t disturb me: understanding secondary tasks on public displays. In Proceedings of the 5th acm international symposium on pervasive displays (PerDis ’16), ACM, New York, NY, USA, 2016, p. 1–12. doi:10.1145/2914920.2915023
[BibTeX] [Abstract] [PDF]
@InProceedings{alt2016perdis1,
author = {Alt, Florian and Torma, Sarah and Buschek, Daniel},
title = {Don't Disturb Me: Understanding Secondary Tasks on Public Displays},
booktitle = {Proceedings of the 5th ACM International Symposium on Pervasive Displays},
year = {2016},
series = {PerDis '16},
pages = {1--12},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2016perdis1},
acmid = {2915023},
doi = {10.1145/2914920.2915023},
isbn = {978-1-4503-4366-4},
location = {Oulu, Finland},
numpages = {12},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016perdis1.pdf},
}
F. Alt, A. Bulling, L. Mecke, and D. Buschek. Attention, please!: comparing features for measuring audience attention towards pervasive displays. In Proceedings of the 2016 acm conference on designing interactive systems (DIS ’16), ACM, New York, NY, USA, 2016, p. 823–828. doi:10.1145/2901790.2901897
[BibTeX] [Abstract] [PDF]
Measuring audience attention towards pervasive displays is important but accurate measurement in real time remains a significant sensing challenge. Consequently, researchers and practitioners typically use other features, such as face presence, as a proxy. We provide a principled comparison of the performance of six features and their combinations for measuring attention: face presence, movement trajectory, walking speed, shoulder orientation, head pose, and gaze direction. We implemented a prototype that is capable of capturing this rich set of features from video and depth camera data. Using a controlled lab experiment (N=18) we show that as a single feature, face presence is indeed among the most accurate. We further show that accuracy can be increased through a combination of features (+10.3%), knowledge about the audience (+63.8%), as well as user identities (+69.0%). Our findings are valuable for display providers who want to collect data on display effectiveness or build interactive, responsive apps.
@InProceedings{alt2016dis,
author = {Alt, Florian and Bulling, Andreas and Mecke, Lukas and Buschek, Daniel},
title = {Attention, Please!: Comparing Features for Measuring Audience Attention Towards Pervasive Displays},
booktitle = {Proceedings of the 2016 ACM Conference on Designing Interactive Systems},
year = {2016},
series = {DIS '16},
pages = {823--828},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2016dis},
abstract = {Measuring audience attention towards pervasive displays is important but accurate measurement in real time remains a significant sensing challenge. Consequently, researchers and practitioners typically use other features, such as face presence, as a proxy. We provide a principled comparison of the performance of six features and their combinations for measuring attention: face presence, movement trajectory, walking speed, shoulder orientation, head pose, and gaze direction. We implemented a prototype that is capable of capturing this rich set of features from video and depth camera data. Using a controlled lab experiment (N=18) we show that as a single feature, face presence is indeed among the most accurate. We further show that accuracy can be increased through a combination of features (+10.3%), knowledge about the audience (+63.8%), as well as user identities (+69.0%). Our findings are valuable for display providers who want to collect data on display effectiveness or build interactive, responsive apps.},
acmid = {2901897},
doi = {10.1145/2901790.2901897},
isbn = {978-1-4503-4031-1},
keywords = {audience funnel, interaction, phases, public displays, zones},
location = {Brisbane, QLD, Australia},
numpages = {6},
timestamp = {2016.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016dis.pdf},
}
D. Buschek, A. De Luca, and F. Alt. Evaluating the influence of targets and hand postures on touch-based behavioural biometrics. In Proceedings of the 2016 chi conference on human factors in computing systems (CHI ’16), ACM, New York, NY, USA, 2016, p. 1349–1361. doi:10.1145/2858036.2858165
[BibTeX] [Abstract] [PDF]
Users’ individual differences in their mobile touch behaviour can help to continuously verify identity and protect personal data. However, little is known about the influence of GUI elements and hand postures on such touch biometrics. Thus, we present a metric to measure the amount of user-revealing information that can be extracted from touch targeting interactions and apply it in eight targeting tasks with over 150,000 touches from 24 users in two sessions. We compare touch-to-target offset patterns for four target types and two hand postures. Our analyses reveal that small, compactly shaped targets near screen edges yield the most descriptive touch targeting patterns. Moreover, our results show that thumb touches are more individual than index finger ones. We conclude that touch-based user identification systems should analyse GUI layouts and infer hand postures. We also describe a framework to estimate the usefulness of GUIs for touch biometrics.
@InProceedings{buschek2016chi2,
author = {Buschek, Daniel and De Luca, Alexander and Alt, Florian},
title = {Evaluating the Influence of Targets and Hand Postures on Touch-based Behavioural Biometrics},
booktitle = {Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems},
year = {2016},
series = {CHI '16},
pages = {1349--1361},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2016chi2},
abstract = {Users' individual differences in their mobile touch behaviour can help to continuously verify identity and protect personal data. However, little is known about the influence of GUI elements and hand postures on such touch biometrics. Thus, we present a metric to measure the amount of user-revealing information that can be extracted from touch targeting interactions and apply it in eight targeting tasks with over 150,000 touches from 24 users in two sessions. We compare touch-to-target offset patterns for four target types and two hand postures. Our analyses reveal that small, compactly shaped targets near screen edges yield the most descriptive touch targeting patterns. Moreover, our results show that thumb touches are more individual than index finger ones. We conclude that touch-based user identification systems should analyse GUI layouts and infer hand postures. We also describe a framework to estimate the usefulness of GUIs for touch biometrics.},
acmid = {2858165},
doi = {10.1145/2858036.2858165},
isbn = {978-1-4503-3362-7},
keywords = {behavioural biometrics, mobile device, touch targeting},
location = {San Jose, California, USA},
numpages = {13},
timestamp = {2016.05.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2016chi2.pdf},
}
D. Buschek, F. Hartmann, E. von Zezschwitz, A. De Luca, and F. Alt. Snapapp: reducing authentication overhead with a time-constrained fast unlock option. In Proceedings of the 2016 chi conference on human factors in computing systems (CHI ’16), ACM, New York, NY, USA, 2016, p. 3736–3747. doi:10.1145/2858036.2858164
[BibTeX] [Abstract] [PDF]
We present SnapApp, a novel unlock concept for mobile devices that reduces authentication overhead with a time-constrained quick-access option. SnapApp provides two unlock methods at once: While PIN entry enables full access to the device, users can also bypass authentication with a short sliding gesture (“Snap”). This grants access for a limited amount of time (e.g. 30 seconds). The device then automatically locks itself upon expiration. Our concept further explores limiting the possible number of Snaps in a row, and configuring blacklists for app use during short access (e.g. to exclude banking apps). We discuss opportunities and challenges of this concept based on a 30-day field study with 18 participants, including data logging and experience sampling methods. Snaps significantly reduced unlock times, and our app was perceived to offer a good tradeoff. Conceptual challenges include, for example, supporting users in configuring their blacklists.
@InProceedings{buschek2016chi1,
author = {Buschek, Daniel and Hartmann, Fabian and von Zezschwitz, Emanuel and De Luca, Alexander and Alt, Florian},
title = {SnapApp: Reducing Authentication Overhead with a Time-Constrained Fast Unlock Option},
booktitle = {Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems},
year = {2016},
series = {CHI '16},
pages = {3736--3747},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2016chi1},
abstract = {We present SnapApp, a novel unlock concept for mobile devices that reduces authentication overhead with a time-constrained quick-access option. SnapApp provides two unlock methods at once: While PIN entry enables full access to the device, users can also bypass authentication with a short sliding gesture ("Snap"). This grants access for a limited amount of time (e.g. 30 seconds). The device then automatically locks itself upon expiration. Our concept further explores limiting the possible number of Snaps in a row, and configuring blacklists for app use during short access (e.g. to exclude banking apps). We discuss opportunities and challenges of this concept based on a 30-day field study with 18 participants, including data logging and experience sampling methods. Snaps significantly reduced unlock times, and our app was perceived to offer a good tradeoff. Conceptual challenges include, for example, supporting users in configuring their blacklists.},
acmid = {2858164},
doi = {10.1145/2858036.2858164},
isbn = {978-1-4503-3362-7},
keywords = {smartphone authentication, time-constrained device access, usable privacy and security},
location = {San Jose, California, USA},
numpages = {12},
timestamp = {2016.05.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2016chi1.pdf},
}
R. Haeuslschmid, B. Pfleging, and F. Alt. A design space to support the development of windshield applications for the car. In Proceedings of the 2016 chi conference on human factors in computing systems (CHI ’16), ACM, New York, NY, USA, 2016, p. 5076–5091. doi:10.1145/2858036.2858336
[BibTeX] [Abstract] [PDF]
In this paper we present a design space for interactive windshield displays in vehicles and discuss how this design space can support designers in creating windshield applications for drivers, passengers, and pedestrians. Our work is motivated by numerous examples in other HCI-related areas where seminal design space papers served as a valuable basis to evolve the respective field – most notably mobile devices, automotive user interfaces, and interactive public displays. The presented design space is based on a comprehensive literature review. Furthermore we present a classification of 211 windshield applications, derived from a survey of research projects and commercial products as well as from focus groups. We showcase the utility of our work for designers of windshield applications through two scenarios. Overall, our design space can help building applications for diverse use cases. This includes apps inside and outside the car as well as applications for specific areas (fire fighters, police, ambulance).
@InProceedings{haeuslschmid2016chi,
author = {Haeuslschmid, Renate and Pfleging, Bastian and Alt, Florian},
title = {A Design Space to Support the Development of Windshield Applications for the Car},
booktitle = {Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems},
year = {2016},
series = {CHI '16},
pages = {5076--5091},
address = {New York, NY, USA},
publisher = {ACM},
note = {haeuslschmid2016chi},
abstract = {In this paper we present a design space for interactive windshield displays in vehicles and discuss how this design space can support designers in creating windshield applications for drivers, passengers, and pedestrians. Our work is motivated by numerous examples in other HCI-related areas where seminal design space papers served as a valuable basis to evolve the respective field -- most notably mobile devices, automotive user interfaces, and interactive public displays. The presented design space is based on a comprehensive literature review. Furthermore we present a classification of 211 windshield applications, derived from a survey of research projects and commercial products as well as from focus groups. We showcase the utility of our work for designers of windshield applications through two scenarios. Overall, our design space can help building applications for diverse use cases. This includes apps inside and outside the car as well as applications for specific areas (fire fighters, police, ambulance).},
acmid = {2858336},
doi = {10.1145/2858036.2858336},
isbn = {978-1-4503-3362-7},
keywords = {automotive interfaces, design space, head-up display, in-vehicle interfaces, windshield display},
location = {San Jose, California, USA},
numpages = {16},
timestamp = {2016.05.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/haeuslschmid2016chi.pdf},
}
H. Schneider, K. Moser, A. Butz, and F. Alt. Understanding the mechanics of persuasive system design: a mixed-method theory-driven analysis of freeletics. In Proceedings of the 2016 chi conference on human factors in computing systems (CHI ’16), ACM, New York, NY, USA, 2016, p. 309–320. doi:10.1145/2858036.2858290
[BibTeX] [Abstract] [PDF]
While we know that persuasive system design matters, we barely understand when persuasive strategies work and why they only work in some cases. We propose an approach to systematically understand and design for motivation, by studying the fundamental building blocks of motivation, according to the theory of planned behavior (TPB): attitude, subjective norm, and perceived control. We quantitatively analyzed (N=643) the attitudes, beliefs, and values of mobile fitness coach users with TPB. Capacity (i.e., perceived ability to exercise) had the biggest effect on users’ motivation. Using individual differences theory, we identified three distinct user groups, namely followers, hedonists, and achievers. With insights from semi-structured interviews (N=5) we derive design implications finding that transformation videos that feature other users’ success stories as well as suggesting an appropriate workout can have positive effects on perceived capacity. Practitioners and researchers can use our theory-based mixed-method research design to better understand user behavior in persuasive applications.
@InProceedings{schneider2016chi,
author = {Schneider, Hanna and Moser, Kilian and Butz, Andreas and Alt, Florian},
title = {Understanding the Mechanics of Persuasive System Design: A Mixed-Method Theory-driven Analysis of Freeletics},
booktitle = {Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems},
year = {2016},
series = {CHI '16},
pages = {309--320},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneider2016chi},
abstract = {While we know that persuasive system design matters, we barely understand when persuasive strategies work and why they only work in some cases. We propose an approach to systematically understand and design for motivation, by studying the fundamental building blocks of motivation, according to the theory of planned behavior (TPB): attitude, subjective norm, and perceived control. We quantitatively analyzed (N=643) the attitudes, beliefs, and values of mobile fitness coach users with TPB. Capacity (i.e., perceived ability to exercise) had the biggest effect on users' motivation. Using individual differences theory, we identified three distinct user groups, namely followers, hedonists, and achievers. With insights from semi-structured interviews (N=5) we derive design implications finding that transformation videos that feature other users' success stories as well as suggesting an appropriate workout can have positive effects on perceived capacity. Practitioners and researchers can use our theory-based mixed-method research design to better understand user behavior in persuasive applications.},
acmid = {2858290},
doi = {10.1145/2858036.2858290},
isbn = {978-1-4503-3362-7},
keywords = {behavior change, fitness application, personal values, persuasive technology, theory of planned behavior},
location = {San Jose, California, USA},
numpages = {12},
timestamp = {2016.05.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneider2016chi.pdf},
}
M. Khamis, F. Alt, M. Hassib, E. von Zezschwitz, R. Hasholzner, and A. Bulling. Gazetouchpass: multimodal authentication using gaze and touch on mobile devices. In Proceedings of the 2016 chi conference extended abstracts on human factors in computing systems (CHI EA ’16), ACM, New York, NY, USA, 2016, p. 2156–2164. doi:10.1145/2851581.2892314
[BibTeX] [Abstract] [PDF]
We propose a multimodal scheme, GazeTouchPass, that combines gaze and touch for shoulder-surfing resistant user authentication on mobile devices. GazeTouchPass allows passwords with multiple switches between input modalities during authentication. This requires attackers to simultaneously observe the device screen and the user’s eyes to find the password. We evaluate the security and usability of GazeTouchPass in two user studies. Our findings show that GazeTouchPass is usable and significantly more secure than single-modal authentication against basic and even advanced shoulder-surfing attacks.
@InProceedings{khamis2016chiea,
author = {Khamis, Mohamed and Alt, Florian and Hassib, Mariam and von Zezschwitz, Emanuel and Hasholzner, Regina and Bulling, Andreas},
title = {GazeTouchPass: Multimodal Authentication Using Gaze and Touch on Mobile Devices},
booktitle = {Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2016},
series = {CHI EA '16},
pages = {2156--2164},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2016chiea},
abstract = {We propose a multimodal scheme, GazeTouchPass, that combines gaze and touch for shoulder-surfing resistant user authentication on mobile devices. GazeTouchPass allows passwords with multiple switches between input modalities during authentication. This requires attackers to simultaneously observe the device screen and the user's eyes to find the password. We evaluate the security and usability of GazeTouchPass in two user studies. Our findings show that GazeTouchPass is usable and significantly more secure than single-modal authentication against basic and even advanced shoulder-surfing attacks.},
acmid = {2892314},
doi = {10.1145/2851581.2892314},
isbn = {978-1-4503-4082-3},
keywords = {gaze gestures, mobile devices, multimodal authentication},
location = {San Jose, California, USA},
numpages = {9},
timestamp = {2016.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016chiea.pdf},
}
M. Hassib, M. Khamis, S. Schneegass, A. S. Shirazi, and F. Alt. Investigating user needs for bio-sensing and affective wearables. In Proceedings of the 2016 chi conference extended abstracts on human factors in computing systems (CHI EA ’16), ACM, New York, NY, USA, 2016, p. 1415–1422. doi:10.1145/2851581.2892480
[BibTeX] [Abstract] [PDF]
Bio-sensing wearables are currently advancing to provide users with a lot of information about their physiological and affective states. However, relatively little is known about users’ interest in acquiring, sharing and receiving this information and through which channels and modalities. To close this gap, we report on the results of an online survey (N=109) exploring principle aspects of the design space of wearables such as data types, contexts, feedback modalities and sharing behaviors. Results show that users are interested in obtaining physiological, emotional and cognitive data through modalities beyond traditional touchscreen output. Valence of the information, whether positive or negative affects the sharing behaviors.
@InProceedings{hassib2016chiea,
author = {Hassib, Mariam and Khamis, Mohamed and Schneegass, Stefan and Shirazi, Ali Sahami and Alt, Florian},
title = {Investigating User Needs for Bio-sensing and Affective Wearables},
booktitle = {Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2016},
series = {CHI EA '16},
pages = {1415--1422},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2016chiea},
abstract = {Bio-sensing wearables are currently advancing to provide users with a lot of information about their physiological and affective states. However, relatively little is known about users' interest in acquiring, sharing and receiving this information and through which channels and modalities. To close this gap, we report on the results of an online survey (N=109) exploring principle aspects of the design space of wearables such as data types, contexts, feedback modalities and sharing behaviors. Results show that users are interested in obtaining physiological, emotional and cognitive data through modalities beyond traditional touchscreen output. Valence of the information, whether positive or negative affects the sharing behaviors.},
acmid = {2892480},
doi = {10.1145/2851581.2892480},
isbn = {978-1-4503-4082-3},
keywords = {cognition, emotion, physiological sensing, wearables},
location = {San Jose, California, USA},
numpages = {8},
timestamp = {2016.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2016chiea.pdf},
}
J. Shi, D. Buschek, and F. Alt. Investigating the impact of feedback on gaming performance on motivation to interact with public displays. In Proceedings of the 2016 chi conference extended abstracts on human factors in computing systems (CHI EA ’16), ACM, New York, NY, USA, 2016, p. 1344–1351. doi:10.1145/2851581.2892465
[BibTeX] [Abstract] [PDF]
This paper investigates the influence of feedback about users’ performance on their motivation as they interact with games on displays in public space. Our research is motivated by the fact that games are popular among both researchers and practitioners, due to their ability to attract many users. However, it is widely unclear, which factors impact on how much people play and whether they leave personal information on the display. We investigate different forms of feedback (highscore, real-time score and real-time rank during gameplay) and report on how they influence the behavior of users. Our results are based on data from the deployment of an interactive game in a public space.
@InProceedings{shi2016chiea,
author = {Shi, Jiamin and Buschek, Daniel and Alt, Florian},
title = {Investigating the Impact of Feedback on Gaming Performance on Motivation to Interact with Public Displays},
booktitle = {Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2016},
series = {CHI EA '16},
pages = {1344--1351},
address = {New York, NY, USA},
publisher = {ACM},
note = {shi2016chiea},
abstract = {This paper investigates the influence of feedback about users' performance on their motivation as they interact with games on displays in public space. Our research is motivated by the fact that games are popular among both researchers and practitioners, due to their ability to attract many users. However, it is widely unclear, which factors impact on how much people play and whether they leave personal information on the display. We investigate different forms of feedback (highscore, real-time score and real-time rank during gameplay) and report on how they influence the behavior of users. Our results are based on data from the deployment of an interactive game in a public space.},
acmid = {2892465},
doi = {10.1145/2851581.2892465},
isbn = {978-1-4503-4082-3},
keywords = {competition, motivation, public displays, user performance},
location = {San Jose, California, USA},
numpages = {8},
timestamp = {2016.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/shi2016chiea.pdf},
}
J. Shi and F. Alt. The anonymous audience analyzer: visualizing audience behavior in public space. In Proceedings of the 2016 chi conference extended abstracts on human factors in computing systems (CHI EA ’16), ACM, New York, NY, USA, 2016, p. 3766–3769. doi:10.1145/2851581.2890256
[BibTeX] [Abstract] [PDF]
With dropping hardware prices, an increasing number of interactive displays is being deployed in public space. To investigate and understand the impact of novel interaction techniques, content, and display properties, researchers and practitioners alike rely on observations of the audience. While in-situ observations are costly in terms of time and effort, video data allows situations in front of the display to be analyzed post-hoc. In many situations, however, video recordings are not possible since the privacy of users needs to be protected. To address this challenge, we present a tool that allows scenes in front of a display to be reconstructed from Kinect data (user position and body posture) and visualized in a virtual environment. In this way, the privacy of the audience can be preserved while allowing display owners to run in-depth investigations of their display installations.
@InProceedings{shi2016chidemo,
author = {Shi, Jiamin and Alt, Florian},
title = {The Anonymous Audience Analyzer: Visualizing Audience Behavior in Public Space},
booktitle = {Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2016},
series = {CHI EA '16},
pages = {3766--3769},
address = {New York, NY, USA},
publisher = {ACM},
note = {shi2016chidemo},
abstract = {With dropping hardware prices, an increasing number of interactive displays is being deployed in public space. To investigate and understand the impact of novel interaction techniques, content, and display properties, researchers and practitioners alike rely on observations of the audience. While in-situ observations are costly in terms of time and effort, video data allows situations in front of the display to be analyzed post-hoc. In many situations, however, video recordings are not possible since the privacy of users needs to be protected. To address this challenge, we present a tool that allows scenes in front of a display to be reconstructed from Kinect data (user position and body posture) and visualized in a virtual environment. In this way, the privacy of the audience can be preserved while allowing display owners to run in-depth investigations of their display installations.},
acmid = {2890256},
doi = {10.1145/2851581.2890256},
isbn = {978-1-4503-4082-3},
keywords = {audience behaviour, public displays, virtual reality},
location = {San Jose, California, USA},
numpages = {4},
timestamp = {2016.04.23},
url = {http://www.florian-alt.org/unibw/wp-content/publications/shi2016chidemo.pdf},
}

### 2015

F. Alt, A. Bulling, G. Gravanis, and D. Buschek. Gravityspot: guiding users in front of public displays using on-screen visual cues. In Proceedings of the 28th annual acm symposium on user interface software &\#38; technology (UIST ’15), ACM, New York, NY, USA, 2015, p. 47–56. doi:10.1145/2807442.2807490
[BibTeX] [Abstract] [PDF]
Users tend to position themselves in front of interactive public displays in such a way as to best perceive its content. Currently, this sweet spot is implicitly defined by display properties, content, the input modality, as well as space constraints in front of the display. We present GravitySpot – an approach that makes sweet spots flexible by actively guiding users to arbitrary target positions in front of displays using visual cues. Such guidance is beneficial, for example, if a particular input technology only works at a specific distance or if users should be guided towards a non-crowded area of a large display. In two controlled lab studies (n=29) we evaluate different visual cues based on color, shape, and motion, as well as position-to-cue mapping functions. We show that both the visual cues and mapping functions allow for fine-grained control over positioning speed and accuracy. Findings are complemented by observations from a 3-month real-world deployment.
@InProceedings{alt2015uist,
author = {Alt, Florian and Bulling, Andreas and Gravanis, Gino and Buschek, Daniel},
title = {GravitySpot: Guiding Users in Front of Public Displays Using On-Screen Visual Cues},
booktitle = {Proceedings of the 28th Annual ACM Symposium on User Interface Software \&\#38; Technology},
year = {2015},
series = {UIST '15},
pages = {47--56},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2015uist},
abstract = {Users tend to position themselves in front of interactive public displays in such a way as to best perceive its content. Currently, this sweet spot is implicitly defined by display properties, content, the input modality, as well as space constraints in front of the display. We present GravitySpot - an approach that makes sweet spots flexible by actively guiding users to arbitrary target positions in front of displays using visual cues. Such guidance is beneficial, for example, if a particular input technology only works at a specific distance or if users should be guided towards a non-crowded area of a large display. In two controlled lab studies (n=29) we evaluate different visual cues based on color, shape, and motion, as well as position-to-cue mapping functions. We show that both the visual cues and mapping functions allow for fine-grained control over positioning speed and accuracy. Findings are complemented by observations from a 3-month real-world deployment.},
acmid = {2807490},
doi = {10.1145/2807442.2807490},
isbn = {978-1-4503-3779-3},
keywords = {audience behavior, interaction, public displays, sweet spot},
location = {Charlotte, NC, USA},
numpages = {10},
timestamp = {2015.11.18},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2015uist.pdf},
}
P. Panhey, T. Döring, S. Schneegass, D. Wenig, and F. Alt. What people really remember: understanding cognitive effects when interacting with large displays. In Proceedings of the 2015 international conference on interactive tabletops & surfaces (ITS ’15), ACM, New York, NY, USA, 2015, p. 103–106. doi:10.1145/2817721.2817732
[BibTeX] [Abstract] [PDF]
This paper investigates how common interaction techniques for large displays impact on recall in learning tasks. Our work is motivated by results of prior research in different areas that attribute a positive effect of interactivity to cognition. We present findings from a controlled lab experiment with 32 participants comparing mobile phone-based interaction, touch interaction and full-body interaction to a non-interactive baseline. In contrast to prior findings, our results reveal that more movement can negatively influence recall. In particular we show that designers are facing an immanent trade-off between designing engaging interaction through extensive movement and creating memorable content.
@InProceedings{panhey2015its,
author = {Panhey, Philipp and D\"{o}ring, Tanja and Schneegass, Stefan and Wenig, Dirk and Alt, Florian},
title = {What People Really Remember: Understanding Cognitive Effects When Interacting with Large Displays},
booktitle = {Proceedings of the 2015 International Conference on Interactive Tabletops \& Surfaces},
year = {2015},
series = {ITS '15},
pages = {103--106},
address = {New York, NY, USA},
publisher = {ACM},
note = {panhey2015its},
abstract = {This paper investigates how common interaction techniques for large displays impact on recall in learning tasks. Our work is motivated by results of prior research in different areas that attribute a positive effect of interactivity to cognition. We present findings from a controlled lab experiment with 32 participants comparing mobile phone-based interaction, touch interaction and full-body interaction to a non-interactive baseline. In contrast to prior findings, our results reveal that more movement can negatively influence recall. In particular we show that designers are facing an immanent trade-off between designing engaging interaction through extensive movement and creating memorable content.},
acmid = {2817732},
doi = {10.1145/2817721.2817732},
isbn = {978-1-4503-3899-8},
keywords = {cognition, interactivity, pervasive displays, recall},
numpages = {4},
timestamp = {2015.11.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/panhey2015its.pdf},
}
A. Fedosov, E. Niforatos, F. Alt, and I. Elhart. Supporting interactivity on a ski lift. In Adjunct proceedings of the 2015 acm international joint conference on pervasive and ubiquitous computing and proceedings of the 2015 acm international symposium on wearable computers (UbiComp/ISWC’15 Adjunct), Association for Computing Machinery, New York, NY, USA, 2015, p. 767–770. doi:10.1145/2800835.2807952
[BibTeX] [PDF]
@InProceedings{fedosov2015ubicompadj,
author = {Fedosov, Anton and Niforatos, Evangelos and Alt, Florian and Elhart, Ivan},
title = {Supporting Interactivity on a Ski Lift},
booktitle = {Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 ACM International Symposium on Wearable Computers},
year = {2015},
pages = {767–770},
address = {New York, NY, USA},
publisher = {Association for Computing Machinery},
doi = {10.1145/2800835.2807952},
isbn = {9781450335751},
keywords = {skiing, interaction, public displays, outdoor sports},
location = {Osaka, Japan},
numpages = {4},
timestamp = {2015.09.15},
}
N. Broy, M. Nefzger, F. Alt, M. Hassib, and A. Schmidt. 3D-HUDD – Developing a Prototyping Tool for 3D Head-Up Displays. In Proceedings of the 15th IFIP TC13 International Conference on Human-Computer Interaction (INTERACT ’15), ACM, New York, NY, USA, 2015.
[BibTeX] [Abstract] [PDF]
The ability of head-up displays (HUDs) to present informationwithin the usual viewpoint of the user has led to a quick adoption indomains where attention is crucial, such as in the car. As HUDs employ3D technology, further opportunities emerge: information can be structuredand positioned in 3D space thus allowing important information tobe perceived more easily and information can be registered with objectsin the visual scene to communicate a relationship. This allows novel userinterfaces to be built. As of today, however, no prototyping tools exist,that allow 3D UIs for HUDs to be sketched and tested prior to development.To close this gap, we report on the design and development ofthe 3D Head-Up Display Designer (3D-HUDD). In addition, we presentan evaluation of the tool with 24 participants, comparing dierent inputmodalities and depth management modes.
@InProceedings{broy2015interact,
author = {Nora Broy AND Matthias Nefzger AND Florian Alt AND Mariam Hassib AND Albrecht Schmidt},
title = {{3D-HUDD - Developing a Prototyping Tool for 3D Head-Up Displays}},
booktitle = {{Proceedings of the 15th IFIP TC13 International Conference on Human-Computer Interaction}},
year = {2015},
series = {INTERACT '15},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2015interact},
abstract = {The ability of head-up displays (HUDs) to present informationwithin the usual viewpoint of the user has led to a quick adoption indomains where attention is crucial, such as in the car. As HUDs employ3D technology, further opportunities emerge: information can be structuredand positioned in 3D space thus allowing important information tobe perceived more easily and information can be registered with objectsin the visual scene to communicate a relationship. This allows novel userinterfaces to be built. As of today, however, no prototyping tools exist,that allow 3D UIs for HUDs to be sketched and tested prior to development.To close this gap, we report on the design and development ofthe 3D Head-Up Display Designer (3D-HUDD). In addition, we presentan evaluation of the tool with 24 participants, comparing dierent inputmodalities and depth management modes.},
location = {Bamberg, Germany},
numpages = {6},
owner = {florianalt},
timestamp = {2015.09.14},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2015interact.pdf},
}
N. Broy, M. Guo, S. Schneegass, B. Pfleging, and F. Alt. Introducing novel technologies in the car: conducting a real-world study to test 3d dashboards. In Proceedings of the 7th international conference on automotive user interfaces and interactive vehicular applications (AutomotiveUI ’15), ACM, New York, NY, USA, 2015, p. 179–186. doi:10.1145/2799250.2799280
[BibTeX] [Abstract] [PDF]
Today, the vast majority of research on novel automotive user interface technologies is conducted in the lab, often using driving simulation. While such studies are important in early stages of the design process, we argue that ultimately studies need to be conducted in the real-world in order to investigate all aspects crucial for adoption of novel user interface technologies in commercial vehicles. In this paper, we present a case study that investigates introducing autostereoscopic 3D dashboards into cars. We report on studying this novel technology in the real world, validating and extending findings of prior simulator studies. Furthermore, we provide guidelines for practitioners and researchers to design and conduct real-world studies that minimize the risk for participants while at the same time yielding ecologically valid findings.
@InProceedings{broy2015autoui,
author = {Broy, Nora and Guo, Mengbing and Schneegass, Stefan and Pfleging, Bastian and Alt, Florian},
title = {Introducing Novel Technologies in the Car: Conducting a Real-world Study to Test 3D Dashboards},
booktitle = {Proceedings of the 7th International Conference on Automotive User Interfaces and Interactive Vehicular Applications},
year = {2015},
series = {AutomotiveUI '15},
pages = {179--186},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2015autoui},
abstract = {Today, the vast majority of research on novel automotive user interface technologies is conducted in the lab, often using driving simulation. While such studies are important in early stages of the design process, we argue that ultimately studies need to be conducted in the real-world in order to investigate all aspects crucial for adoption of novel user interface technologies in commercial vehicles. In this paper, we present a case study that investigates introducing autostereoscopic 3D dashboards into cars. We report on studying this novel technology in the real world, validating and extending findings of prior simulator studies. Furthermore, we provide guidelines for practitioners and researchers to design and conduct real-world studies that minimize the risk for participants while at the same time yielding ecologically valid findings.},
acmid = {2799280},
doi = {10.1145/2799250.2799280},
isbn = {978-1-4503-3736-6},
keywords = {automotive UIs, real world study, stereoscopic 3D},
location = {Nottingham, United Kingdom},
numpages = {8},
timestamp = {2015.09.14},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2015autoui.pdf},
}
M. Khamis, F. Alt, and A. Bulling. A field study on spontaneous gaze-based interaction with a public display using pursuits. In Adjunct proceedings of the 2015 acm international joint conference on pervasive and ubiquitous computing and proceedings of the 2015 acm international symposium on wearable computers (UbiComp/ISWC’15 Adjunct), ACM, New York, NY, USA, 2015, p. 863–872. doi:10.1145/2800835.2804335
[BibTeX] [Abstract] [PDF]
Smooth pursuit eye movements were recently introduced as a promising technique for calibration-free and thus spontaneous and natural gaze interaction. While pursuits have been evaluated in controlled laboratory studies, the technique has not yet been evaluated with respect to usability in the wild. We report on a field study in which we deployed a game on a public display where participants used pursuits to select fish moving in linear and circular trajectories at different speeds. The study ran for two days in a busy computer lab resulting in a total of 56 interactions. Results from our study show that linear trajectories are statistically faster to select via pursuits than circular trajectories. We also found that pursuits is well perceived by users who find it fast and responsive.
@InProceedings{khamis2015petmei,
author = {Khamis, Mohamed and Alt, Florian and Bulling, Andreas},
title = {A Field Study on Spontaneous Gaze-based Interaction with a Public Display Using Pursuits},
booktitle = {Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 ACM International Symposium on Wearable Computers},
year = {2015},
pages = {863--872},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2015petmei},
abstract = {Smooth pursuit eye movements were recently introduced as a promising technique for calibration-free and thus spontaneous and natural gaze interaction. While pursuits have been evaluated in controlled laboratory studies, the technique has not yet been evaluated with respect to usability in the wild. We report on a field study in which we deployed a game on a public display where participants used pursuits to select fish moving in linear and circular trajectories at different speeds. The study ran for two days in a busy computer lab resulting in a total of 56 interactions. Results from our study show that linear trajectories are statistically faster to select via pursuits than circular trajectories. We also found that pursuits is well perceived by users who find it fast and responsive.},
acmid = {2804335},
doi = {10.1145/2800835.2804335},
isbn = {978-1-4503-3575-1},
keywords = {field study, pervasive displays, public displays, pursuits, smooth pursuit eye movement},
location = {Osaka, Japan},
numpages = {10},
timestamp = {2015.09.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2015petmei.pdf},
}
M. Khamis, A. Bulling, and F. Alt. Tackling challenges of interactive public displays using gaze. In Adjunct proceedings of the 2015 acm international joint conference on pervasive and ubiquitous computing and proceedings of the 2015 acm international symposium on wearable computers (UbiComp/ISWC’15 Adjunct), ACM, New York, NY, USA, 2015, p. 763–766. doi:10.1145/2800835.2807951
[BibTeX] [Abstract] [PDF]
Falling hardware prices led to a widespread use of public displays. Common interaction techniques for such displays currently include touch, mid-air, or smartphone-based interaction. While these techniques are well understood from a technical perspective, several remaining challenges hinder the uptake of interactive displays among passersby. In this paper we propose addressing major public display challenges through gaze as a novel interaction modality. We discuss why gaze-based interaction can tackle these challenges effectively and discuss how solutions can be technically realized. Furthermore, we summarize state-of-the-art eye tracking techniques that show particular promise in the area of public displays.
@InProceedings{khamis2015pdapps,
author = {Khamis, Mohamed and Bulling, Andreas and Alt, Florian},
title = {Tackling Challenges of Interactive Public Displays Using Gaze},
booktitle = {Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 ACM International Symposium on Wearable Computers},
year = {2015},
pages = {763--766},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2015pdapps},
abstract = {Falling hardware prices led to a widespread use of public displays. Common interaction techniques for such displays currently include touch, mid-air, or smartphone-based interaction. While these techniques are well understood from a technical perspective, several remaining challenges hinder the uptake of interactive displays among passersby. In this paper we propose addressing major public display challenges through gaze as a novel interaction modality. We discuss why gaze-based interaction can tackle these challenges effectively and discuss how solutions can be technically realized. Furthermore, we summarize state-of-the-art eye tracking techniques that show particular promise in the area of public displays.},
acmid = {2807951},
doi = {10.1145/2800835.2807951},
isbn = {978-1-4503-3575-1},
keywords = {digital signage, gaze, gaze-based interaction, pervasive displays, public displays},
location = {Osaka, Japan},
numpages = {4},
timestamp = {2015.09.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2015pdapps.pdf},
}
D. Buschek, I. Just, B. Fritzsche, and F. Alt. Make Me Laugh: A Recommendation System for Humoristic Content on the World Wide Web. In Proceedings of Mensch and Computer 2015 (), 2015.
[BibTeX] [Abstract] [PDF]
Humoristic content is an inherent part of the World Wide Web and increasingly consumed for micro-entertainment. However, humor is often highly individual and depends on background knowledge and context. This paper presents an approach to recommend humoristic content fitting each individual user’s taste and interests. In a field study with 150 participants over four weeks, users rated content with a 0-10 scale on a humor website. Based on this data, we train and apply a Collaborative Filtering (CF) algorithm to assess individual humor and recommend fitting content. Our study shows that users rate recommended content 22.6% higher than randomly chosen content.
@InProceedings{buschek2015muc,
author = {Buschek, Daniel and Just, Ingo and Fritzsche, Benjamin AND Alt, Florian},
title = {{Make Me Laugh: A Recommendation System for Humoristic Content on the World Wide Web}},
booktitle = {{Proceedings of Mensch and Computer 2015}},
year = {2015},
note = {buschek2015muc},
abstract = {Humoristic content is an inherent part of the World Wide Web and increasingly consumed for micro-entertainment. However, humor is often highly individual and depends on background knowledge and context. This paper presents an approach to recommend humoristic content fitting each individual user's taste and interests. In a field study with 150 participants over four weeks, users rated content with a 0-10 scale on a humor website. Based on this data, we train and apply a Collaborative Filtering (CF) algorithm to assess individual humor and recommend fitting content. Our study shows that users rate recommended content 22.6% higher than randomly chosen content.},
location = {Stuttgart, Germany},
numpages = {10},
owner = {florian},
timestamp = {2015.09.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015muc.pdf},
}
F. Alt, S. Schneegass, A. S. Shirazi, M. Hassib, and A. Bulling. Graphical passwords in the wild: understanding how users choose pictures and passwords in image-based authentication schemes. In Proceedings of the 17th international conference on human-computer interaction with mobile devices and services (MobileHCI ’15), ACM, New York, NY, USA, 2015, p. 316–322. doi:10.1145/2785830.2785882
[BibTeX] [Abstract] [PDF]
Common user authentication methods on smartphones, such as lock patterns, PINs, or passwords, impose a trade-off between security and password memorability. Image-based passwords were proposed as a secure and usable alternative. As of today, however, it remains unclear how such schemes are used in the wild. We present the first study to investigate how image-based passwords are used over long periods of time in the real world. Our analyses are based on data from 2318 unique devices collected over more than one year using a custom application released in the Android Play store. We present an in-depth analysis of what kind of images users select, how they define their passwords, and how secure these passwords are. Our findings provide valuable insights into real-world use of image-based passwords and inform the design of future graphical authentication schemes.
@InProceedings{alt2015mobilehci,
author = {Alt, Florian and Schneegass, Stefan and Shirazi, Alireza Sahami and Hassib, Mariam and Bulling, Andreas},
title = {Graphical Passwords in the Wild: Understanding How Users Choose Pictures and Passwords in Image-based Authentication Schemes},
booktitle = {Proceedings of the 17th International Conference on Human-Computer Interaction with Mobile Devices and Services},
year = {2015},
series = {MobileHCI '15},
pages = {316--322},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2015mobilehci},
abstract = {Common user authentication methods on smartphones, such as lock patterns, PINs, or passwords, impose a trade-off between security and password memorability. Image-based passwords were proposed as a secure and usable alternative. As of today, however, it remains unclear how such schemes are used in the wild. We present the first study to investigate how image-based passwords are used over long periods of time in the real world. Our analyses are based on data from 2318 unique devices collected over more than one year using a custom application released in the Android Play store. We present an in-depth analysis of what kind of images users select, how they define their passwords, and how secure these passwords are. Our findings provide valuable insights into real-world use of image-based passwords and inform the design of future graphical authentication schemes.},
acmid = {2785882},
doi = {10.1145/2785830.2785882},
isbn = {978-1-4503-3652-9},
keywords = {Graphical passwords, images, security},
location = {Copenhagen, Denmark},
numpages = {7},
timestamp = {2015.08.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2015mobilehci.pdf},
}
D. Buschek, A. De Luca, and F. Alt. There is more to typing than speed: expressive mobile touch keyboards via dynamic font personalisation. In Proceedings of the 17th international conference on human-computer interaction with mobile devices and services (MobileHCI ’15), ACM, New York, NY, USA, 2015, p. 125–130. doi:10.1145/2785830.2785844
[BibTeX] [Abstract] [PDF]
Typing is a common task on mobile devices and has been widely addressed in HCI research, mostly regarding quantitative factors such as error rates and speed. Qualitative aspects, like personal expressiveness, have received less attention. This paper makes individual typing behaviour visible to the users to render mobile typing more personal and expressive in varying contexts: We introduce a dynamic font personalisation framework, TapScript, which adapts a finger-drawn font according to user behaviour and context, such as finger placement, device orientation and movements – resulting in a handwritten-looking font. We implemented TapScript for evaluation with an online survey (N=91) and a field study with a chat app (N=11). Looking at resulting fonts, survey participants distinguished pairs of typists with 84.5% accuracy and walking/sitting with 94.8%. Study participants perceived fonts as individual and the chat experience as personal. They also made creative explicit use of font adaptations.
@InProceedings{buschek2015mobilehci,
author = {Buschek, Daniel and De Luca, Alexander and Alt, Florian},
title = {There is More to Typing Than Speed: Expressive Mobile Touch Keyboards via Dynamic Font Personalisation},
booktitle = {Proceedings of the 17th International Conference on Human-Computer Interaction with Mobile Devices and Services},
year = {2015},
series = {MobileHCI '15},
pages = {125--130},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2015mobilehci},
abstract = {Typing is a common task on mobile devices and has been widely addressed in HCI research, mostly regarding quantitative factors such as error rates and speed. Qualitative aspects, like personal expressiveness, have received less attention. This paper makes individual typing behaviour visible to the users to render mobile typing more personal and expressive in varying contexts: We introduce a dynamic font personalisation framework, TapScript, which adapts a finger-drawn font according to user behaviour and context, such as finger placement, device orientation and movements - resulting in a handwritten-looking font. We implemented TapScript for evaluation with an online survey (N=91) and a field study with a chat app (N=11). Looking at resulting fonts, survey participants distinguished pairs of typists with 84.5% accuracy and walking/sitting with 94.8%. Study participants perceived fonts as individual and the chat experience as personal. They also made creative explicit use of font adaptations.},
acmid = {2785844},
doi = {10.1145/2785830.2785844},
isbn = {978-1-4503-3652-9},
keywords = {Font Personalisation, Mobile, Touch Typing},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2015.08.23},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015mobilehci.pdf},
}
D. Buschek, A. Auch, and F. Alt. A toolkit for analysis and prediction of touch targeting behaviour on mobile websites. In Proceedings of the 7th acm sigchi symposium on engineering interactive computing systems (EICS ’15), ACM, New York, NY, USA, 2015, p. 54–63. doi:10.1145/2774225.2774851
[BibTeX] [Abstract] [PDF]
Touch interaction on mobile devices suffers from several problems, such as the thumb’s limited reach or the occlusion of targets by the finger. This leads to offsets between the user’s intended touch location and the actual location sensed by the device. Recent research has modelled such offset patterns to analyse and predict touch targeting behaviour. However, these models have only been applied in lab experiments for specific tasks (typing, pointing, targeting games). In contrast, their applications to websites are yet unexplored. To close this gap, this paper explores the potential of touch modelling for the mobile web: We present a toolkit which allows web developers to collect and analyse touch interactions with their websites. Our system can learn about users’ targeting patterns to simulate expected touch interactions and help identify potential usability issues for future versions of the website prior to deployment. We train models on data collected in a field experiment with 50 participants in a shopping scenario. Our analyses show that the resulting models capture interesting behavioural patterns, reveal insights into user-specific behaviour, and enable predictions of expected error rates for individual interface elements.
@InProceedings{buschek2015eics,
author = {Buschek, Daniel and Auch, Alexander and Alt, Florian},
title = {A Toolkit for Analysis and Prediction of Touch Targeting Behaviour on Mobile Websites},
booktitle = {Proceedings of the 7th ACM SIGCHI Symposium on Engineering Interactive Computing Systems},
year = {2015},
series = {EICS '15},
pages = {54--63},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2015eics},
abstract = {Touch interaction on mobile devices suffers from several problems, such as the thumb's limited reach or the occlusion of targets by the finger. This leads to offsets between the user's intended touch location and the actual location sensed by the device. Recent research has modelled such offset patterns to analyse and predict touch targeting behaviour. However, these models have only been applied in lab experiments for specific tasks (typing, pointing, targeting games). In contrast, their applications to websites are yet unexplored. To close this gap, this paper explores the potential of touch modelling for the mobile web: We present a toolkit which allows web developers to collect and analyse touch interactions with their websites. Our system can learn about users' targeting patterns to simulate expected touch interactions and help identify potential usability issues for future versions of the website prior to deployment. We train models on data collected in a field experiment with 50 participants in a shopping scenario. Our analyses show that the resulting models capture interesting behavioural patterns, reveal insights into user-specific behaviour, and enable predictions of expected error rates for individual interface elements.},
acmid = {2774851},
doi = {10.1145/2774225.2774851},
isbn = {978-1-4503-3646-8},
keywords = {mobile, targeting, toolkit, touch, user model, web},
location = {Duisburg, Germany},
numpages = {10},
timestamp = {2015.06.23},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015eics.pdf},
}
T. Dingler, M. Funk, and F. Alt. Interaction proxemics: combining physical spaces for seamless gesture interaction. In Proceedings of the 4th international symposium on pervasive displays (PerDis ’15), ACM, New York, NY, USA, 2015, p. 107–114. doi:10.1145/2757710.2757722
[BibTeX] [Abstract] [PDF]
Touch and gesture input have become popular for display interaction. While applications usually focus on one particular input technology, we set out to adjust the interaction modality based on the proximity of users to the screen. Therefore, we built a system which combines technology-transparent interaction spaces across 4 interaction zones: touch, fine-grained, general, and coarse gestures. In a user study, participants performed a pointing task within and across these zones. Results show that zone transitions are most feasible up to 2m from the screen. Hence, applications can map functionality across different interaction zones, thereby providing additional interaction dimensions and decreasing the complexity of the gesture set. We collected subjective feedback and present a user-defined gesture set for performing a series of standard tasks across different interaction zones. Seamless transition between these spaces is essential to create a consistent interaction experience; finally, we discuss characteristics of systems that take into account user proxemics as input modality.
@InProceedings{dingler2015perdis,
author = {Dingler, Tilman and Funk, Markus and Alt, Florian},
title = {Interaction Proxemics: Combining Physical Spaces for Seamless Gesture Interaction},
booktitle = {Proceedings of the 4th International Symposium on Pervasive Displays},
year = {2015},
series = {PerDis '15},
pages = {107--114},
address = {New York, NY, USA},
publisher = {ACM},
note = {dingler2015perdis},
abstract = {Touch and gesture input have become popular for display interaction. While applications usually focus on one particular input technology, we set out to adjust the interaction modality based on the proximity of users to the screen. Therefore, we built a system which combines technology-transparent interaction spaces across 4 interaction zones: touch, fine-grained, general, and coarse gestures. In a user study, participants performed a pointing task within and across these zones. Results show that zone transitions are most feasible up to 2m from the screen. Hence, applications can map functionality across different interaction zones, thereby providing additional interaction dimensions and decreasing the complexity of the gesture set. We collected subjective feedback and present a user-defined gesture set for performing a series of standard tasks across different interaction zones. Seamless transition between these spaces is essential to create a consistent interaction experience; finally, we discuss characteristics of systems that take into account user proxemics as input modality.},
acmid = {2757722},
doi = {10.1145/2757710.2757722},
isbn = {978-1-4503-3608-6},
keywords = {Interaction, distance, gestures, proxemics},
location = {Saarbruecken, Germany},
numpages = {8},
timestamp = {2015.06.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/dingler2015perdis.pdf},
}
A. Colley, L. Ventä-Olkkonen, F. Alt, and J. Häkkilä. Insights from deploying see-through augmented reality signage in the wild. In Proceedings of the 4th international symposium on pervasive displays (PerDis ’15), ACM, New York, NY, USA, 2015, p. 179–185. doi:10.1145/2757710.2757730
[BibTeX] [Abstract] [PDF]
Typically the key challenges with interactive digital signage are (1) interaction times are short (usually in the order of seconds), (2) interaction needs to be very easy to understand, and (3) interaction needs to provide a benefit that justifies the effort to engage. To tackle these challenges, we propose a see-through augmented reality application for digital signage that enables passersby to observe the area behind the display, augmented with useful data. We report on the development and deployment of our application in two public settings: a public library and a supermarket. Based on observations of 261 (library) and 661 (supermarket) passersby and 14 interviews, we provide early insights and implications for application designers. Our results show a significant increase in attention: the see-through signage was noticed by 46% of the people, compared to 14% with the non-see through version. Furthermore, findings indicate that to best benefit the passersby, the AR displays should clearly communicate their purpose.
@InProceedings{colley2015perdis,
author = {Colley, Ashley and Vent\"{a}-Olkkonen, Leena and Alt, Florian and H\"{a}kkil\"{a}, Jonna},
title = {Insights from Deploying See-Through Augmented Reality Signage in the Wild},
booktitle = {Proceedings of the 4th International Symposium on Pervasive Displays},
year = {2015},
series = {PerDis '15},
pages = {179--185},
address = {New York, NY, USA},
publisher = {ACM},
note = {colley2015perdis},
abstract = {Typically the key challenges with interactive digital signage are (1) interaction times are short (usually in the order of seconds), (2) interaction needs to be very easy to understand, and (3) interaction needs to provide a benefit that justifies the effort to engage. To tackle these challenges, we propose a see-through augmented reality application for digital signage that enables passersby to observe the area behind the display, augmented with useful data. We report on the development and deployment of our application in two public settings: a public library and a supermarket. Based on observations of 261 (library) and 661 (supermarket) passersby and 14 interviews, we provide early insights and implications for application designers. Our results show a significant increase in attention: the see-through signage was noticed by 46% of the people, compared to 14% with the non-see through version. Furthermore, findings indicate that to best benefit the passersby, the AR displays should clearly communicate their purpose.},
acmid = {2757730},
doi = {10.1145/2757710.2757730},
isbn = {978-1-4503-3608-6},
keywords = {AR, attention, digital signage, interaction, public displays},
location = {Saarbruecken, Germany},
numpages = {7},
timestamp = {2015.06.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/colley2015perdis.pdf},
}
N. Memarovic, S. Clinch, and F. Alt. Understanding display blindness in future display deployments. In Proceedings of the 4th international symposium on pervasive displays (PerDis ’15), ACM, New York, NY, USA, 2015, p. 7–14. doi:10.1145/2757710.2757719
[BibTeX] [Abstract] [PDF]
Digital displays are heralded as a transformative medium for communication. However, a known challenge in the domain is that of display blindness in which passersby pay little or no attention to public displays. This phenomenon has been a major motivation for much of the research on public displays. However, since the early observations, little has been done to develop our understanding of display blindness – for example, to identify determining factors or propose appropriate metrics. Hence, the degree to which developments in signage form, content, and interaction address display blindness remains unclear. In this paper we examine and categorize current approaches to studying and addressing display blindness. Based on our analysis we identify open questions in the research space, including the impact of display physicality and audience differences, relationships with other observed effects, the impact of research interventions, and selection of appropriate metrics. The goal of this paper is to start a discussion within the community on the topic, and to inform the design of future research.
@InProceedings{memarovic2015perdis,
author = {Memarovic, Nemanja and Clinch, Sarah and Alt, Florian},
title = {Understanding Display Blindness in Future Display Deployments},
booktitle = {Proceedings of the 4th International Symposium on Pervasive Displays},
year = {2015},
series = {PerDis '15},
pages = {7--14},
address = {New York, NY, USA},
publisher = {ACM},
note = {memarovic2015perdis},
abstract = {Digital displays are heralded as a transformative medium for communication. However, a known challenge in the domain is that of display blindness in which passersby pay little or no attention to public displays. This phenomenon has been a major motivation for much of the research on public displays. However, since the early observations, little has been done to develop our understanding of display blindness -- for example, to identify determining factors or propose appropriate metrics. Hence, the degree to which developments in signage form, content, and interaction address display blindness remains unclear. In this paper we examine and categorize current approaches to studying and addressing display blindness. Based on our analysis we identify open questions in the research space, including the impact of display physicality and audience differences, relationships with other observed effects, the impact of research interventions, and selection of appropriate metrics. The goal of this paper is to start a discussion within the community on the topic, and to inform the design of future research.},
acmid = {2757719},
doi = {10.1145/2757710.2757719},
isbn = {978-1-4503-3608-6},
keywords = {Display blindness, interaction blindness, public displays},
location = {Saarbruecken, Germany},
numpages = {8},
timestamp = {2015.06.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2015perdis.pdf},
}
D. Buschek, A. De Luca, and F. Alt. Improving accuracy, applicability and usability of keystroke biometrics on mobile touchscreen devices. In Proceedings of the 33rd annual acm conference on human factors in computing systems (CHI ’15), ACM, New York, NY, USA, 2015, p. 1393–1402. doi:10.1145/2702123.2702252
[BibTeX] [Abstract] [PDF]
Authentication methods can be improved by considering implicit, individual behavioural cues. In particular, verifying users based on typing behaviour has been widely studied with physical keyboards. On mobile touchscreens, the same concepts have been applied with little adaptations so far. This paper presents the first reported study on mobile keystroke biometrics which compares touch-specific features between three different hand postures and evaluation schemes. Based on 20.160 password entries from a study with 28 participants over two weeks, we show that including spatial touch features reduces implicit authentication equal error rates (EER) by 26.4 – 36.8% relative to the previously used temporal features. We also show that authentication works better for some hand postures than others. To improve applicability and usability, we further quantify the influence of common evaluation assumptions: known attacker data, training and testing on data from a single typing session, and fixed hand postures. We show that these practices can lead to overly optimistic evaluations. In consequence, we describe evaluation recommendations, a probabilistic framework to handle unknown hand postures, and ideas for further improvements.
@InProceedings{buschek2015chi,
author = {Buschek, Daniel and De Luca, Alexander and Alt, Florian},
title = {Improving Accuracy, Applicability and Usability of Keystroke Biometrics on Mobile Touchscreen Devices},
booktitle = {Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems},
year = {2015},
series = {CHI '15},
pages = {1393--1402},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2015chi},
abstract = {Authentication methods can be improved by considering implicit, individual behavioural cues. In particular, verifying users based on typing behaviour has been widely studied with physical keyboards. On mobile touchscreens, the same concepts have been applied with little adaptations so far. This paper presents the first reported study on mobile keystroke biometrics which compares touch-specific features between three different hand postures and evaluation schemes. Based on 20.160 password entries from a study with 28 participants over two weeks, we show that including spatial touch features reduces implicit authentication equal error rates (EER) by 26.4 - 36.8% relative to the previously used temporal features. We also show that authentication works better for some hand postures than others. To improve applicability and usability, we further quantify the influence of common evaluation assumptions: known attacker data, training and testing on data from a single typing session, and fixed hand postures. We show that these practices can lead to overly optimistic evaluations. In consequence, we describe evaluation recommendations, a probabilistic framework to handle unknown hand postures, and ideas for further improvements.},
acmid = {2702252},
doi = {10.1145/2702123.2702252},
isbn = {978-1-4503-3145-6},
keywords = {biometrics, keystroke dynamics, mobile, touch},
location = {Seoul, Republic of Korea},
numpages = {10},
timestamp = {2015.04.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015chi.pdf},
}
M. Pfeiffer, T. Dünte, S. Schneegass, F. Alt, and M. Rohs. Cruise control for pedestrians: controlling walking direction using electrical muscle stimulation. In Proceedings of the 33rd annual acm conference on human factors in computing systems (CHI ’15), ACM, New York, NY, USA, 2015, p. 2505–2514. doi:10.1145/2702123.2702190
[BibTeX] [Abstract] [PDF]
Pedestrian navigation systems require users to perceive, interpret, and react to navigation information. This can tax cognition as navigation information competes with information from the real world. We propose actuated navigation, a new kind of pedestrian navigation in which the user does not need to attend to the navigation task at all. An actuation signal is directly sent to the human motor system to influence walking direction. To achieve this goal we stimulate the sartorius muscle using electrical muscle stimulation. The rotation occurs during the swing phase of the leg and can easily be counteracted. The user therefore stays in control. We discuss the properties of actuated navigation and present a lab study on identifying basic parameters of the technique as well as an outdoor study in a park. The results show that our approach changes a user’s walking direction by about 16°/m on average and that the system can successfully steer users in a park with crowded areas, distractions, obstacles, and uneven ground.
@InProceedings{pfeiffer2015chi,
author = {Pfeiffer, Max and D\"{u}nte, Tim and Schneegass, Stefan and Alt, Florian and Rohs, Michael},
title = {Cruise Control for Pedestrians: Controlling Walking Direction Using Electrical Muscle Stimulation},
booktitle = {Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems},
year = {2015},
series = {CHI '15},
pages = {2505--2514},
address = {New York, NY, USA},
publisher = {ACM},
note = {pfeiffer2015chi},
abstract = {Pedestrian navigation systems require users to perceive, interpret, and react to navigation information. This can tax cognition as navigation information competes with information from the real world. We propose actuated navigation, a new kind of pedestrian navigation in which the user does not need to attend to the navigation task at all. An actuation signal is directly sent to the human motor system to influence walking direction. To achieve this goal we stimulate the sartorius muscle using electrical muscle stimulation. The rotation occurs during the swing phase of the leg and can easily be counteracted. The user therefore stays in control. We discuss the properties of actuated navigation and present a lab study on identifying basic parameters of the technique as well as an outdoor study in a park. The results show that our approach changes a user's walking direction by about 16°/m on average and that the system can successfully steer users in a park with crowded areas, distractions, obstacles, and uneven ground.},
acmid = {2702190},
doi = {10.1145/2702123.2702190},
isbn = {978-1-4503-3145-6},
keywords = {actuated navigation, electrical muscle stimulation, haptic feedback, pedestrian navigation, wearable devices},
location = {Seoul, Republic of Korea},
numpages = {10},
timestamp = {2015.04.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeiffer2015chi.pdf},
}
D. Buschek, M. Spitzer, and F. Alt. Video-recording your life: user perception and experiences. In Proceedings of the 33rd annual acm conference extended abstracts on human factors in computing systems (CHI EA ’15), ACM, New York, NY, USA, 2015, p. 2223–2228. doi:10.1145/2702613.2732743
[BibTeX] [Abstract] [PDF]
Video recording is becoming an integral part of our daily activities: Action cams and wearable cameras allow us to capture scenes of our daily life effortlessly. This trend generates vast amounts of video material impossible to review manually. However, these recordings also contain a lot of information potentially interesting to the recording individual and to others. Such videos can provide a meaningful summary of the day, serving as a digital extension to the user’s human memory. They might also be interesting to others as tutorials (e.g. how to change a flat tyre). As a first step towards this vision, we present a survey assessing the users’ view and their video recording behavior. Findings were used to inform the design of a prototype based on off-the-shelf components, which allows users to create meaningful video clips of their daily activities in an automated manner by using their phone and any wearable camera. We conclude with a preliminary, qualitative study showing the feasibility and potential of the approach and sketch future research directions.
@InProceedings{buschek2015chiea,
author = {Buschek, Daniel and Spitzer, Michael and Alt, Florian},
title = {Video-Recording Your Life: User Perception and Experiences},
booktitle = {Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2015},
series = {CHI EA '15},
pages = {2223--2228},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2015chiea},
abstract = {Video recording is becoming an integral part of our daily activities: Action cams and wearable cameras allow us to capture scenes of our daily life effortlessly. This trend generates vast amounts of video material impossible to review manually. However, these recordings also contain a lot of information potentially interesting to the recording individual and to others. Such videos can provide a meaningful summary of the day, serving as a digital extension to the user's human memory. They might also be interesting to others as tutorials (e.g. how to change a flat tyre). As a first step towards this vision, we present a survey assessing the users' view and their video recording behavior. Findings were used to inform the design of a prototype based on off-the-shelf components, which allows users to create meaningful video clips of their daily activities in an automated manner by using their phone and any wearable camera. We conclude with a preliminary, qualitative study showing the feasibility and potential of the approach and sketch future research directions.},
acmid = {2732743},
doi = {10.1145/2702613.2732743},
isbn = {978-1-4503-3146-3},
keywords = {context, life logging, smartphone, video recording},
location = {Seoul, Republic of Korea},
numpages = {6},
timestamp = {2015.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015chiea.pdf},
}
N. Broy, S. Schneegass, M. Guo, F. Alt, and A. Schmidt. Evaluating stereoscopic 3d for automotive user interfaces in a real-world driving study. In Proceedings of the 33rd annual acm conference extended abstracts on human factors in computing systems (CHI EA ’15), ACM, New York, NY, USA, 2015, p. 1717–1722. doi:10.1145/2702613.2732902
[BibTeX] [Abstract] [PDF]
This paper reports on the use of in-car 3D displays in a real-world driving scenario. Today, stereoscopic displays are becoming ubiquitous in many domains such as mobile phones or TVs. Instead of using 3D for entertainment, we explore the 3D effect as a mean to spatially structure user interface (UI) elements. To evaluate potentials and drawbacks of in-car 3D displays we mounted an autostereoscopic display as instrument cluster in a vehicle and conducted a real-world driving study with 15 experts in automotive UI design. The results show that the 3D effect increases the perceived quality of the UI and enhances the presentation of spatial information (e.g., navigation cues) compared to 2D. However, the effect should be used well-considered to avoid spatial clutter which can increase the system’s complexity.
@InProceedings{broy2015chiea,
author = {Broy, Nora and Schneegass, Stefan and Guo, Mengbing and Alt, Florian and Schmidt, Albrecht},
title = {Evaluating Stereoscopic 3D for Automotive User Interfaces in a Real-World Driving Study},
booktitle = {Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2015},
series = {CHI EA '15},
pages = {1717--1722},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2015chiea},
abstract = {This paper reports on the use of in-car 3D displays in a real-world driving scenario. Today, stereoscopic displays are becoming ubiquitous in many domains such as mobile phones or TVs. Instead of using 3D for entertainment, we explore the 3D effect as a mean to spatially structure user interface (UI) elements. To evaluate potentials and drawbacks of in-car 3D displays we mounted an autostereoscopic display as instrument cluster in a vehicle and conducted a real-world driving study with 15 experts in automotive UI design. The results show that the 3D effect increases the perceived quality of the UI and enhances the presentation of spatial information (e.g., navigation cues) compared to 2D. However, the effect should be used well-considered to avoid spatial clutter which can increase the system's complexity.},
acmid = {2732902},
doi = {10.1145/2702613.2732902},
isbn = {978-1-4503-3146-3},
keywords = {automotive user interfaces, stereoscopic 3D},
location = {Seoul, Republic of Korea},
numpages = {6},
timestamp = {2015.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2015chiea.pdf},
}
D. Buschek and F. Alt. Touchml: a machine learning toolkit for modelling spatial touch targeting behaviour. In Proceedings of the 20th international conference on intelligent user interfaces (IUI ’15), ACM, New York, NY, USA, 2015, p. 110–114. doi:10.1145/2678025.2701381
[BibTeX] [Abstract] [PDF]
Pointing tasks are commonly studied in HCI research, for example to evaluate and compare different interaction techniques or devices. A recent line of work has modelled user-specific touch behaviour with machine learning methods to reveal spatial targeting error patterns across the screen. These models can also be applied to improve accuracy of touchscreens and keyboards, and to recognise users and hand postures. However, no implementation of these techniques has been made publicly available yet, hindering broader use in research and practical deployments. Therefore, this paper presents a toolkit which implements such touch models for data analysis (Python), mobile applications (Java/Android), and the web (JavaScript). We demonstrate several applications, including hand posture recognition, on touch targeting data collected in a study with 24 participants. We consider different target types and hand postures, changing behaviour over time, and the influence of hand sizes.
@InProceedings{buschek2015iui,
author = {Buschek, Daniel and Alt, Florian},
title = {TouchML: A Machine Learning Toolkit for Modelling Spatial Touch Targeting Behaviour},
booktitle = {Proceedings of the 20th International Conference on Intelligent User Interfaces},
year = {2015},
series = {IUI '15},
pages = {110--114},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2015iui},
abstract = {Pointing tasks are commonly studied in HCI research, for example to evaluate and compare different interaction techniques or devices. A recent line of work has modelled user-specific touch behaviour with machine learning methods to reveal spatial targeting error patterns across the screen. These models can also be applied to improve accuracy of touchscreens and keyboards, and to recognise users and hand postures. However, no implementation of these techniques has been made publicly available yet, hindering broader use in research and practical deployments. Therefore, this paper presents a toolkit which implements such touch models for data analysis (Python), mobile applications (Java/Android), and the web (JavaScript). We demonstrate several applications, including hand posture recognition, on touch targeting data collected in a study with 24 participants. We consider different target types and hand postures, changing behaviour over time, and the influence of hand sizes.},
acmid = {2701381},
doi = {10.1145/2678025.2701381},
isbn = {978-1-4503-3306-1},
keywords = {gaussian process, machine learning, toolkit, touch},
location = {Atlanta, Georgia, USA},
numpages = {5},
timestamp = {2015.03.29},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015iui.pdf},
}
Proceedings of the 4th international symposium on pervasive displaysNew York, NY, USA: ACM, 2015.
[BibTeX] [PDF]
@Proceedings{gehring2015perdis,
title = {Proceedings of the 4th International Symposium on Pervasive Displays},
year = {2015},
address = {New York, NY, USA},
editor = {Sven Gehring AND Antonio Krüger AND Florian Alt AND Nick Taylor AND Stefan Schneegass},
isbn = {978-1-4503-3608-6},
note = {gehring2015perdis},
publisher = {ACM},
series = {PerDis '15},
location = {Saarbruecken, Germany},
timestamp = {2015-06-01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/gehring2015perdis.pdf},
}

### 2014

S. Schneegass, F. Steimle, A. Bulling, F. Alt, and A. Schmidt. Smudgesafe: geometric image transformations for smudge-resistant user authentication. In Proceedings of the 2014 acm international joint conference on pervasive and ubiquitous computing (UbiComp ’14), ACM, New York, NY, USA, 2014, p. 775–786. doi:10.1145/2632048.2636090
[BibTeX] [Abstract] [PDF]
Touch-enabled user interfaces have become ubiquitous, such as on ATMs or portable devices. At the same time, authentication using touch input is problematic, since finger smudge traces may allow attackers to reconstruct passwords. We present SmudgeSafe, an authentication system that uses random geometric image transformations, such as translation, rotation, scaling, shearing, and flipping, to increase the security of cued-recall graphical passwords. We describe the design space of these transformations and report on two user studies: A lab-based security study involving 20 participants in attacking user-defined passwords, using high quality pictures of real smudge traces captured on a mobile phone display; and an in-the-field usability study with 374 participants who generated more than 130,000 logins on a mobile phone implementation of SmudgeSafe. Results show that SmudgeSafe significantly increases security compared to authentication schemes based on PINs and lock patterns, and exhibits very high learnability, efficiency, and memorability.
@InProceedings{schneegass2014ubicomp,
author = {Schneegass, Stefan and Steimle, Frank and Bulling, Andreas and Alt, Florian and Schmidt, Albrecht},
title = {SmudgeSafe: Geometric Image Transformations for Smudge-resistant User Authentication},
booktitle = {Proceedings of the 2014 ACM International Joint Conference on Pervasive and Ubiquitous Computing},
year = {2014},
series = {UbiComp '14},
pages = {775--786},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneegass2014ubicomp},
abstract = {Touch-enabled user interfaces have become ubiquitous, such as on ATMs or portable devices. At the same time, authentication using touch input is problematic, since finger smudge traces may allow attackers to reconstruct passwords. We present SmudgeSafe, an authentication system that uses random geometric image transformations, such as translation, rotation, scaling, shearing, and flipping, to increase the security of cued-recall graphical passwords. We describe the design space of these transformations and report on two user studies: A lab-based security study involving 20 participants in attacking user-defined passwords, using high quality pictures of real smudge traces captured on a mobile phone display; and an in-the-field usability study with 374 participants who generated more than 130,000 logins on a mobile phone implementation of SmudgeSafe. Results show that SmudgeSafe significantly increases security compared to authentication schemes based on PINs and lock patterns, and exhibits very high learnability, efficiency, and memorability.},
acmid = {2636090},
doi = {10.1145/2632048.2636090},
isbn = {978-1-4503-2968-2},
keywords = {finger smudge traces, graphical passwords, touch input},
location = {Seattle, Washington},
numpages = {12},
timestamp = {2014.09.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014ubicomp.pdf},
}
N. Broy, F. Alt, S. Schneegass, and B. Pfleging. 3d displays in cars: exploring the user performance for a stereoscopic instrument cluster. In Proceedings of the 6th international conference on automotive user interfaces and interactive vehicular applications (AutomotiveUI ’14), ACM, New York, NY, USA, 2014, p. 2:1–2:9. doi:10.1145/2667317.2667319
[BibTeX] [Abstract] [PDF]
In this paper, we investigate user performance for stereoscopic automotive user interfaces (UI). Our work is motivated by the fact that stereoscopic displays are about to find their way into cars. Such a safety-critical application area creates an inherent need to understand how the use of stereoscopic 3D visualizations impacts user performance. We conducted a comprehensive study with 56 participants to investigate the impact of a 3D instrument cluster (IC) on primary and secondary task performance. We investigated different visualizations (2D and 3D) and complexities (low vs. high amount of details) of the IC as well as two 3D display technologies (shutter vs. autostereoscopy). As secondary tasks the participants judged spatial relations between UI elements (expected events) and reacted on pop-up instructions (unexpected events) in the IC. The results show that stereoscopy increases accuracy for expected events, decreases task completion times for unexpected tasks, and increases the attractiveness of the interface. Furthermore, we found a significant influence of the used technology, indicating that secondary task performance improves for shutter displays.
@InProceedings{broy2014autoui,
author = {Broy, Nora and Alt, Florian and Schneegass, Stefan and Pfleging, Bastian},
title = {3D Displays in Cars: Exploring the User Performance for a Stereoscopic Instrument Cluster},
booktitle = {Proceedings of the 6th International Conference on Automotive User Interfaces and Interactive Vehicular Applications},
year = {2014},
series = {AutomotiveUI '14},
pages = {2:1--2:9},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2014autoui},
abstract = {In this paper, we investigate user performance for stereoscopic automotive user interfaces (UI). Our work is motivated by the fact that stereoscopic displays are about to find their way into cars. Such a safety-critical application area creates an inherent need to understand how the use of stereoscopic 3D visualizations impacts user performance. We conducted a comprehensive study with 56 participants to investigate the impact of a 3D instrument cluster (IC) on primary and secondary task performance. We investigated different visualizations (2D and 3D) and complexities (low vs. high amount of details) of the IC as well as two 3D display technologies (shutter vs. autostereoscopy). As secondary tasks the participants judged spatial relations between UI elements (expected events) and reacted on pop-up instructions (unexpected events) in the IC. The results show that stereoscopy increases accuracy for expected events, decreases task completion times for unexpected tasks, and increases the attractiveness of the interface. Furthermore, we found a significant influence of the used technology, indicating that secondary task performance improves for shutter displays.},
acmid = {2667319},
articleno = {2},
doi = {10.1145/2667317.2667319},
isbn = {978-1-4503-3212-5},
keywords = {Automotive UIs, stereoscopic 3D, user performance},
location = {Seattle, WA, USA},
numpages = {9},
timestamp = {2014.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014autoui.pdf},
}
N. Broy, S. Höckh, A. Frederiksen, M. Gilowski, J. Eichhorn, F. Naser, H. Jung, J. Niemann, M. Schell, A. Schmid, and F. Alt. Exploring design parameters for a 3d head-up display. In Proceedings of the international symposium on pervasive displays (PerDis ’14), ACM, New York, NY, USA, 2014, p. 38:38–38:43. doi:10.1145/2611009.2611011
[BibTeX] [Abstract] [PDF]
Today, head-up displays (HUDs) are commonly used in cars to show basic driving information in the visual field of the viewer. This allows information to be perceived in a quick and easy to understand manner. With advances in technology, HUDs will allow richer information to be conveyed to the driver by exploiting the third dimension. We envision a stereoscopic HUD for displaying content in 3D space. This requires an understanding of how parallaxes impact the user’s performance and comfort, which is the focus of this work. In two user studies, involving 49 participants, we (a) gather insights into how projection distances and stereoscopic visualizations influence the comfort zone and (b) the depth judgment of the user. The results show that with larger projection distances both the comfort zone and the minimum comfortable viewing distance increase. Higher distances between the viewer and a real world object to be judged decrease the judgment accuracy.
@InProceedings{broy2014perdis,
author = {Broy, Nora and H\"{o}ckh, Simone and Frederiksen, Annette and Gilowski, Michael and Eichhorn, Julian and Naser, Felix and Jung, Horst and Niemann, Julia and Schell, Martin and Schmid, Albrecht and Alt, Florian},
title = {Exploring Design Parameters for a 3D Head-Up Display},
booktitle = {Proceedings of The International Symposium on Pervasive Displays},
year = {2014},
series = {PerDis '14},
pages = {38:38--38:43},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2014perdis},
abstract = {Today, head-up displays (HUDs) are commonly used in cars to show basic driving information in the visual field of the viewer. This allows information to be perceived in a quick and easy to understand manner. With advances in technology, HUDs will allow richer information to be conveyed to the driver by exploiting the third dimension. We envision a stereoscopic HUD for displaying content in 3D space. This requires an understanding of how parallaxes impact the user's performance and comfort, which is the focus of this work. In two user studies, involving 49 participants, we (a) gather insights into how projection distances and stereoscopic visualizations influence the comfort zone and (b) the depth judgment of the user. The results show that with larger projection distances both the comfort zone and the minimum comfortable viewing distance increase. Higher distances between the viewer and a real world object to be judged decrease the judgment accuracy.},
acmid = {2611011},
articleno = {38},
doi = {10.1145/2611009.2611011},
isbn = {978-1-4503-2952-1},
keywords = {3D Displays, Automotive UIs, Head-Up Displays, Human Factors},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014perdis.pdf},
}
S. Schneegass and F. Alt. Senscreen: a toolkit for supporting sensor-enabled multi-display networks. In Proceedings of the international symposium on pervasive displays (PerDis ’14), ACM, New York, NY, USA, 2014, p. 92:92–92:97. doi:10.1145/2611009.2611017
[BibTeX] [Abstract] [PDF]
Over the past years, a number of sensors have emerged, that enable gesture-based interaction with public display applications, including Microsoft Kinect, Asus Xtion, and Leap Motion. In this way, interaction with displays can be made more attractive, particularly if deployed across displays hence involving many users. However, interactive applications are still scarce, which can be attributed to the fact that developers usually need to implement a low-level connection to the sensor. In this work, we tackle this issue by presenting a toolkit, called SenScreen, consisting of (a) easy-to-install adapters that handle the low-level connection to sensors and provides the data via (b) an API that allows developers to write their applications in JavaScript. We evaluate our approach by letting two groups of developers create an interactive game each using our toolkit. Observation, interviews, and questionnaire indicate that our toolkit simplifies the implementation of interactive applications and may, hence, serve as a first step towards a more widespread use of interactive public displays.
@InProceedings{schneegass2014perdis2,
author = {Schneegass, Stefan and Alt, Florian},
title = {SenScreen: A Toolkit for Supporting Sensor-enabled Multi-Display Networks},
booktitle = {Proceedings of The International Symposium on Pervasive Displays},
year = {2014},
series = {PerDis '14},
pages = {92:92--92:97},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneegass2014perdis2},
abstract = {Over the past years, a number of sensors have emerged, that enable gesture-based interaction with public display applications, including Microsoft Kinect, Asus Xtion, and Leap Motion. In this way, interaction with displays can be made more attractive, particularly if deployed across displays hence involving many users. However, interactive applications are still scarce, which can be attributed to the fact that developers usually need to implement a low-level connection to the sensor. In this work, we tackle this issue by presenting a toolkit, called SenScreen, consisting of (a) easy-to-install adapters that handle the low-level connection to sensors and provides the data via (b) an API that allows developers to write their applications in JavaScript. We evaluate our approach by letting two groups of developers create an interactive game each using our toolkit. Observation, interviews, and questionnaire indicate that our toolkit simplifies the implementation of interactive applications and may, hence, serve as a first step towards a more widespread use of interactive public displays.},
acmid = {2611017},
articleno = {92},
doi = {10.1145/2611009.2611017},
isbn = {978-1-4503-2952-1},
keywords = {Interactive Applications, Public Display Architecture, Toolkits},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014perdis2.pdf},
}
F. Steinberger, M. Foth, and F. Alt. Vote with your feet: local community polling on urban screens. In Proceedings of the international symposium on pervasive displays (PerDis ’14), ACM, New York, NY, USA, 2014, p. 44:44–44:49. doi:10.1145/2611009.2611015
[BibTeX] [Abstract] [PDF]
Falling prices have led to an ongoing spread of public displays in urban areas. Still, they mostly show passive content such as commercials and digital signage. At the same time, technological advances have enabled the creation of interactive displays potentially increasing their attractiveness for the audience, e.g. through providing a platform for civic discourse. This poses considerable challenges, since displays need to communicate the opportunity to engage, motivate the audience to do so, and be easy to use. In this paper we present Vote With Your Feet, a hyperlocal public polling tool for urban screens allowing users to express their opinions. Similar to vox populi interviews on TV or polls on news websites, the tool is meant to reflect the mindset of the community on topics such as current affairs, cultural identity and local matters. It is novel in that it focuses on a situated civic discourse and provides a tangible user interface, tackling the mentioned challenges. It shows one Yes/No question at a time and enables users to vote by stepping on one of two tangible buttons on the ground. This user interface was introduced to attract people’s attention and to lower participation barriers. Our field study showed that Vote With Your Feet is perceived as inviting and that it can spark discussions among co-located people.
@InProceedings{steinberger2014perdis,
author = {Steinberger, Fabius and Foth, Marcus and Alt, Florian},
title = {Vote With Your Feet: Local Community Polling on Urban Screens},
booktitle = {Proceedings of The International Symposium on Pervasive Displays},
year = {2014},
series = {PerDis '14},
pages = {44:44--44:49},
address = {New York, NY, USA},
publisher = {ACM},
note = {steinberger2014perdis},
abstract = {Falling prices have led to an ongoing spread of public displays in urban areas. Still, they mostly show passive content such as commercials and digital signage. At the same time, technological advances have enabled the creation of interactive displays potentially increasing their attractiveness for the audience, e.g. through providing a platform for civic discourse. This poses considerable challenges, since displays need to communicate the opportunity to engage, motivate the audience to do so, and be easy to use. In this paper we present Vote With Your Feet, a hyperlocal public polling tool for urban screens allowing users to express their opinions. Similar to vox populi interviews on TV or polls on news websites, the tool is meant to reflect the mindset of the community on topics such as current affairs, cultural identity and local matters. It is novel in that it focuses on a situated civic discourse and provides a tangible user interface, tackling the mentioned challenges. It shows one Yes/No question at a time and enables users to vote by stepping on one of two tangible buttons on the ground. This user interface was introduced to attract people's attention and to lower participation barriers. Our field study showed that Vote With Your Feet is perceived as inviting and that it can spark discussions among co-located people.},
acmid = {2611015},
articleno = {44},
doi = {10.1145/2611009.2611015},
isbn = {978-1-4503-2952-1},
keywords = {Polling, civic engagement, public displays, tangible media, ubiquitous computing, urban computing, urban informatics, voting},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/steinberger2014perdis.pdf},
}
S. Schneegass, F. Alt, J. Scheible, and A. Schmidt. Midair displays: concept and first experiences with free-floating pervasive displays. In Proceedings of the international symposium on pervasive displays (PerDis ’14), ACM, New York, NY, USA, 2014, p. 27:27–27:31. doi:10.1145/2611009.2611013
[BibTeX] [Abstract] [PDF]
Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public space. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or as group displays. We see midair displays as a complementary technology to wearable displays. In contrast to statically deployed displays they allow information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, e.g., copter drones, such displays can be easily built. A study on the readability of such displays showcases the potential and feasibility of the concept and provides early insights.
@InProceedings{schneegass2014perdis1,
author = {Schneegass, Stefan and Alt, Florian and Scheible, J\"{u}rgen and Schmidt, Albrecht},
title = {Midair Displays: Concept and First Experiences with Free-Floating Pervasive Displays},
booktitle = {Proceedings of The International Symposium on Pervasive Displays},
year = {2014},
series = {PerDis '14},
pages = {27:27--27:31},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneegass2014perdis1},
abstract = {Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public space. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or as group displays. We see midair displays as a complementary technology to wearable displays. In contrast to statically deployed displays they allow information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, e.g., copter drones, such displays can be easily built. A study on the readability of such displays showcases the potential and feasibility of the concept and provides early insights.},
acmid = {2611013},
articleno = {27},
doi = {10.1145/2611009.2611013},
isbn = {978-1-4503-2952-1},
keywords = {Drones, Free-Floating Displays, Interaction Techniques, Midair Displays, Pervasive Display},
location = {Copenhagen, Denmark},
numpages = {5},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014perdis1.pdf},
}
J. R. Häkkilä, M. Posti, S. Schneegass, F. Alt, K. Gultekin, and A. Schmidt. Let me catch this!: experiencing interactive 3d cinema through collecting content with a mobile phone. In Proceedings of the 32nd annual acm conference on human factors in computing systems (CHI ’14), ACM, New York, NY, USA, 2014, p. 1011–1020. doi:10.1145/2556288.2557187
[BibTeX] [Abstract] [PDF]
The entertainment industry is going through a transformation, and technology development is affecting how we can enjoy and interact with the entertainment media content in new ways. In our work, we explore how to enable interaction with content in the context of 3D cinemas. This allows viewers to use their mobile phone to retrieve, for example, information on the artist of the soundtrack currently playing or a discount coupon on the watch the main actor is wearing. We are particularly interested in the user experience of the interactive 3D cinema concept, and how different interactive elements and interaction techniques are perceived. We report on the development of a prototype application utilizing smart phones and on an evaluation in a cinema context with 20 participants. Results emphasize that designing for interactive cinema experiences should drive for holistic and positive user experiences. Interactive content should be tied together with the actual video content, but integrated into contexts where it does not conflict with the immersive experience with the movie.
@InProceedings{hakkila2014chi,
author = {H\"{a}kkil\"{a}, Jonna R. and Posti, Maaret and Schneegass, Stefan and Alt, Florian and Gultekin, Kunter and Schmidt, Albrecht},
title = {Let Me Catch This!: Experiencing Interactive 3D Cinema Through Collecting Content with a Mobile Phone},
booktitle = {Proceedings of the 32Nd Annual ACM Conference on Human Factors in Computing Systems},
year = {2014},
series = {CHI '14},
pages = {1011--1020},
address = {New York, NY, USA},
publisher = {ACM},
note = {hakkila2014chi},
abstract = {The entertainment industry is going through a transformation, and technology development is affecting how we can enjoy and interact with the entertainment media content in new ways. In our work, we explore how to enable interaction with content in the context of 3D cinemas. This allows viewers to use their mobile phone to retrieve, for example, information on the artist of the soundtrack currently playing or a discount coupon on the watch the main actor is wearing. We are particularly interested in the user experience of the interactive 3D cinema concept, and how different interactive elements and interaction techniques are perceived. We report on the development of a prototype application utilizing smart phones and on an evaluation in a cinema context with 20 participants. Results emphasize that designing for interactive cinema experiences should drive for holistic and positive user experiences. Interactive content should be tied together with the actual video content, but integrated into contexts where it does not conflict with the immersive experience with the movie.},
acmid = {2557187},
doi = {10.1145/2556288.2557187},
isbn = {978-1-4503-2473-1},
keywords = {3d, interactive cinema, mobile phone interaction, user experience, user studies},
numpages = {10},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hakkila2014chi.pdf},
}
M. Greis, F. Alt, N. Henze, and N. Memarovic. I can wait a minute: uncovering the optimal delay time for pre-moderated user-generated content on public displays. In Proceedings of the sigchi conference on human factors in computing systems (CHI ’14), ACM, New York, NY, USA, 2014, p. 1435–1438. doi:10.1145/2556288.2557186
[BibTeX] [Abstract] [PDF]
Public displays have advanced from isolated and non interactive “ad” displays which show images and videos to displays that are networked, interactive, and open to a wide variety of content and applications. Prior work has shown large potential of user-generated content on public displays. However, one of the problems with user-generated content on public displays is moderation as content may be explicit or troublesome for a particular location. In this work we explore the expectations of users with regard to content moderation on public displays. An online survey revealed that people not only think that display content should be moderated but also that a delay of up to 10 minutes is acceptable if display content is moderated. In a subsequent in the wild deployment we compared different moderation delays. We found that a moderation delay significantly decreases the number of user-generated posts while at the same time there is no significant effect on users’ decision to repeatedly post on the display.
@InProceedings{greis2014chi,
author = {Greis, Miriam and Alt, Florian and Henze, Niels and Memarovic, Nemanja},
title = {I Can Wait a Minute: Uncovering the Optimal Delay Time for Pre-moderated User-generated Content on Public Displays},
booktitle = {Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
year = {2014},
series = {CHI '14},
pages = {1435--1438},
address = {New York, NY, USA},
publisher = {ACM},
note = {greis2014chi},
abstract = {Public displays have advanced from isolated and non interactive "ad" displays which show images and videos to displays that are networked, interactive, and open to a wide variety of content and applications. Prior work has shown large potential of user-generated content on public displays. However, one of the problems with user-generated content on public displays is moderation as content may be explicit or troublesome for a particular location. In this work we explore the expectations of users with regard to content moderation on public displays. An online survey revealed that people not only think that display content should be moderated but also that a delay of up to 10 minutes is acceptable if display content is moderated. In a subsequent in the wild deployment we compared different moderation delays. We found that a moderation delay significantly decreases the number of user-generated posts while at the same time there is no significant effect on users' decision to repeatedly post on the display.},
acmid = {2557186},
doi = {10.1145/2556288.2557186},
isbn = {978-1-4503-2473-1},
keywords = {content moderation, public displays, twitter},
numpages = {4},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/greis2014chi.pdf},
}
N. Broy, S. Schneegass, F. Alt, and A. Schmidt. Framebox and mirrorbox: tools and guidelines to support designers in prototyping interfaces for 3d displays. In Proceedings of the 32nd annual acm conference on human factors in computing systems (CHI ’14), ACM, New York, NY, USA, 2014, p. 2037–2046. doi:10.1145/2556288.2557183
[BibTeX] [Abstract] [PDF]
In this paper, we identify design guidelines for stereoscopic 3D (S3D) user interfaces (UIs) and present the MirrorBox and the FrameBox, two UI prototyping tools for S3D displays. As auto-stereoscopy becomes available for the mass market we believe the design of S3D UIs for devices, for example, mobile phones, public displays, or car dashboards, will rapidly gain importance. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. For example, important information can be shown in front of less important information. This paper identifies core requirements for designing S3D UIs and derives concrete guidelines. The requirements also serve as a basis for two depth layout tools we built with the aim to overcome limitations of traditional prototyping when sketching S3D UIs. We evaluated the tools with usability experts and compared them to traditional paper prototyping.
@InProceedings{broy2014chi,
author = {Broy, Nora and Schneegass, Stefan and Alt, Florian and Schmidt, Albrecht},
title = {FrameBox and MirrorBox: Tools and Guidelines to Support Designers in Prototyping Interfaces for 3D Displays},
booktitle = {Proceedings of the 32Nd Annual ACM Conference on Human Factors in Computing Systems},
year = {2014},
series = {CHI '14},
pages = {2037--2046},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2014chi},
abstract = {In this paper, we identify design guidelines for stereoscopic 3D (S3D) user interfaces (UIs) and present the MirrorBox and the FrameBox, two UI prototyping tools for S3D displays. As auto-stereoscopy becomes available for the mass market we believe the design of S3D UIs for devices, for example, mobile phones, public displays, or car dashboards, will rapidly gain importance. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. For example, important information can be shown in front of less important information. This paper identifies core requirements for designing S3D UIs and derives concrete guidelines. The requirements also serve as a basis for two depth layout tools we built with the aim to overcome limitations of traditional prototyping when sketching S3D UIs. We evaluated the tools with usability experts and compared them to traditional paper prototyping.},
acmid = {2557183},
doi = {10.1145/2556288.2557183},
isbn = {978-1-4503-2473-1},
keywords = {prototyping, stereoscopic 3d, user interfaces},
numpages = {10},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014chi.pdf},
}
N. Davies, S. Clinch, and F. Alt, Pervasive Displays – Understanding the Future of Digital Signage, Morgan and Claypool Publishers, 2014.
[BibTeX] [Abstract] [PDF]
Fueled by falling display hardware costs and rising demand, digital signage and pervasive displaysare becoming ever more ubiquitous. Such systems have traditionally been used for advertising andinformation dissemination with digital signage commonplace in shopping malls, airports and publicspaces.While advertising and broadcasting announcements remain important applications, developmentsin sensing and interaction technologies are enabling entirely newclasses of display applicationsthat tailor content to the situation and audience of the display. As a result, signage systems arebeginning to transition from simple broadcast systems to rich platforms for communication andinteraction.In this lecture we provide an introduction to this emerging field for researchers and practitionersinterested in creating state-of-the-art pervasive display systems. We begin by describingthe history of pervasive display research, providing illustrations of key systems, from pioneeringwork on supporting collaboration to contemporary systems designed for personalized informationdelivery.We then consider what the near-future might hold for display networks—describing a seriesof compelling applications that are being postulated for future display networks. Creating suchsystems raises a wide range of challenges and requires designers to make a series of important tradeoffs.We dedicate four chapters to key aspects of pervasive display design: audience engagement,display interaction, system software and system evaluation. These chapters provide an overview ofcurrent thinking in each area. Finally, we present a series of case studies of display systems and ourconcluding remarks.
@Book{davies2014synthesis,
title = {{Pervasive Displays - Understanding the Future of Digital Signage}},
publisher = {Morgan and Claypool Publishers},
year = {2014},
author = {Nigel Davies AND Sarah Clinch AND Florian Alt},
series = {Synthesis Lectures},
note = {davies2014synthesis},
abstract = {Fueled by falling display hardware costs and rising demand, digital signage and pervasive displaysare becoming ever more ubiquitous. Such systems have traditionally been used for advertising andinformation dissemination with digital signage commonplace in shopping malls, airports and publicspaces.While advertising and broadcasting announcements remain important applications, developmentsin sensing and interaction technologies are enabling entirely newclasses of display applicationsthat tailor content to the situation and audience of the display. As a result, signage systems arebeginning to transition from simple broadcast systems to rich platforms for communication andinteraction.In this lecture we provide an introduction to this emerging field for researchers and practitionersinterested in creating state-of-the-art pervasive display systems. We begin by describingthe history of pervasive display research, providing illustrations of key systems, from pioneeringwork on supporting collaboration to contemporary systems designed for personalized informationdelivery.We then consider what the near-future might hold for display networks—describing a seriesof compelling applications that are being postulated for future display networks. Creating suchsystems raises a wide range of challenges and requires designers to make a series of important tradeoffs.We dedicate four chapters to key aspects of pervasive display design: audience engagement,display interaction, system software and system evaluation. These chapters provide an overview ofcurrent thinking in each area. Finally, we present a series of case studies of display systems and ourconcluding remarks.},
booktitle = {Pervasive Displays - Understanding the Future of Digital Signage},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/davies2014synthesis.pdf},
}
N. Broy, B. J. Zierer, S. Schneegass, and F. Alt. Exploring virtual depth for automotive instrument cluster concepts. In Proceedings of the extended abstracts of the 32nd annual acm conference on human factors in computing systems (CHI EA ’14), ACM, New York, NY, USA, 2014, p. 1783–1788. doi:10.1145/2559206.2581362
[BibTeX] [Abstract] [PDF]
This paper compares the user experience of three novel concept designs for 3D-based car dashboards. Our work is motivated by the fact that analogue dashboards are currently being replaced by their digital counterparts. At the same time, auto-stereoscopic displays enter the market, allowing the quality of novel dashboards to be increased, both with regard to the perceived quality and in supporting the driving task. Since no guidelines or principles exist for the design of digital 3D dashboards, we take an initial step in designing and evaluating such interfaces. In a study with 12 participants we were able to show that stereoscopic 3D increases the perceived quality of the display while motion parallax leads to a rather disturbing experience.
@InProceedings{broy2014chiea,
author = {Broy, Nora and Zierer, Benedikt J. and Schneegass, Stefan and Alt, Florian},
title = {Exploring Virtual Depth for Automotive Instrument Cluster Concepts},
booktitle = {Proceedings of the Extended Abstracts of the 32Nd Annual ACM Conference on Human Factors in Computing Systems},
year = {2014},
series = {CHI EA '14},
pages = {1783--1788},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2014chiea},
abstract = {This paper compares the user experience of three novel concept designs for 3D-based car dashboards. Our work is motivated by the fact that analogue dashboards are currently being replaced by their digital counterparts. At the same time, auto-stereoscopic displays enter the market, allowing the quality of novel dashboards to be increased, both with regard to the perceived quality and in supporting the driving task. Since no guidelines or principles exist for the design of digital 3D dashboards, we take an initial step in designing and evaluating such interfaces. In a study with 12 participants we were able to show that stereoscopic 3D increases the perceived quality of the display while motion parallax leads to a rather disturbing experience.},
acmid = {2581362},
doi = {10.1145/2559206.2581362},
isbn = {978-1-4503-2474-8},
keywords = {automotive user interfaces, motion parallax, stereoscopic 3d, user experience},
numpages = {6},
timestamp = {2014.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014chiea.pdf},
}
S. Schneegass, F. Alt, J. Scheible, A. Schmidt, and H. Su. Midair displays: exploring the concept of free-floating public displays. In Chi ’14 extended abstracts on human factors in computing systems (CHI EA ’14), ACM, New York, NY, USA, 2014, p. 2035–2040. doi:10.1145/2559206.2581190
[BibTeX] [Abstract] [PDF]
Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public spaces. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or they could be used as group displays which enable information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, for example copter drones, such displays can be easily built.
@InProceedings{schneegass2014chiea,
author = {Schneegass, Stefan and Alt, Florian and Scheible, J\"{u}rgen and Schmidt, Albrecht and Su, Haifeng},
title = {Midair Displays: Exploring the Concept of Free-floating Public Displays},
booktitle = {CHI '14 Extended Abstracts on Human Factors in Computing Systems},
year = {2014},
series = {CHI EA '14},
pages = {2035--2040},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneegass2014chiea},
abstract = {Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public spaces. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or they could be used as group displays which enable information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, for example copter drones, such displays can be easily built.},
acmid = {2581190},
doi = {10.1145/2559206.2581190},
isbn = {978-1-4503-2474-8},
keywords = {drones, midair displays, public displays},
numpages = {6},
timestamp = {2014.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014chiea.pdf},
}
M. Pfeiffer, S. Schneegass, F. Alt, and M. Rohs. A Design Space for Electrical Muscle Stimulation Feedback for Freehand Interaction. In Proceedings of the First CHI Workshop on Assistive Augmentation (Assistive Augmentation ’14), 2014.
[BibTeX] [Abstract] [PDF]
Free-hand interaction becomes a common technique forinteracting with large displays. At the same time,providing haptic feedback for free-hand interaction is stilla challenge, particularly feedback with dierentcharacteristics (i.e., strengths, patterns) to conveyparticular information. We see electrical musclestimulation (EMS) as a well-suited technology forproviding haptic feedback in this domain. Thecharacteristics of EMS can be used to assist users inlearning, manipulating, and perceiving virtual objects.One of the core challenges is to understand thesecharacteristics and how they can be applied. As a step inthis direction, this paper presents a design space thatidenties dierent aspects of using EMS for hapticfeedback. The design space is meant as a basis for futureresearch investigating how particular characteristics can beexploited to provide specic haptic feedback.
@InProceedings{pfeiffer2014asstech,
author = {Pfeiffer, Max AND Schneegass, Stefan AND Alt, Florian and Rohs, Michael},
title = {{A Design Space for Electrical Muscle Stimulation Feedback for Freehand Interaction}},
booktitle = {{Proceedings of the First CHI Workshop on Assistive Augmentation}},
year = {2014},
series = {Assistive Augmentation '14},
note = {pfeiffer2014asstech},
abstract = {Free-hand interaction becomes a common technique forinteracting with large displays. At the same time,providing haptic feedback for free-hand interaction is stilla challenge, particularly feedback with dierentcharacteristics (i.e., strengths, patterns) to conveyparticular information. We see electrical musclestimulation (EMS) as a well-suited technology forproviding haptic feedback in this domain. Thecharacteristics of EMS can be used to assist users inlearning, manipulating, and perceiving virtual objects.One of the core challenges is to understand thesecharacteristics and how they can be applied. As a step inthis direction, this paper presents a design space thatidenties dierent aspects of using EMS for hapticfeedback. The design space is meant as a basis for futureresearch investigating how particular characteristics can beexploited to provide specic haptic feedback.},
numpages = {6},
timestamp = {2014.04.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeiffer2014asstech.pdf},
}
F. Alt, N. Memarovic, M. Greis, and N. Henze. UniDisplay – A Research Prototype to Investigate Expectations Towards Public Display Applications. In Proceedings of the 1st Workshop on Developing Applications for Pervasive Display Networks (PD-Apps ’14), IEEE, 2014.
[BibTeX] [Abstract] [PDF]
As public display networks become open, noveltypes of interaction applications emerge. In particular, we expectapplications that support user-generated content to rapidly gainimportance, since they provide a tangible benefit for the userin the form of digital bulletin boards, discussion platform thatfoster public engagement, and applications that allow for selfexpression.At the same time, such applications infer severalchallenges: first, they need to provide suitable means for thepasserby to contribute content to the application; second, mechanismsneed to be employed that provide sufficient control for thedisplay owner with regard to content moderation; and third, theusers’ expectations with regard to the posting procedure needsto be well understood. In this paper we present UniDisplay, aresearch prototype that enables users to post text and images toa public display. We report on the design and development ofthe application and provide early insights of the deployment ina University setting.
@InProceedings{alt2014pdapps,
author = {Alt, Florian and Memarovic, Nemanja AND Greis, Miriam and Henze, Niels},
title = {{UniDisplay - A Research Prototype to Investigate Expectations Towards Public Display Applications}},
booktitle = {{Proceedings of the 1st Workshop on Developing Applications for Pervasive Display Networks}},
year = {2014},
series = {PD-Apps '14},
publisher = {IEEE},
note = {alt2014pdapps},
abstract = {As public display networks become open, noveltypes of interaction applications emerge. In particular, we expectapplications that support user-generated content to rapidly gainimportance, since they provide a tangible benefit for the userin the form of digital bulletin boards, discussion platform thatfoster public engagement, and applications that allow for selfexpression.At the same time, such applications infer severalchallenges: first, they need to provide suitable means for thepasserby to contribute content to the application; second, mechanismsneed to be employed that provide sufficient control for thedisplay owner with regard to content moderation; and third, theusers’ expectations with regard to the posting procedure needsto be well understood. In this paper we present UniDisplay, aresearch prototype that enables users to post text and images toa public display. We report on the design and development ofthe application and provide early insights of the deployment ina University setting.},
keywords = {UniDisplay, Public Displays},
location = {Budapest, Israel},
numpages = {6},
owner = {florianalt},
timestamp = {2014.03.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2014pdapps.pdf},
}
M. Pfeiffer, S. Schneegass, F. Alt, and M. Rohs. Let me grab this: a comparison of ems and vibration for haptic feedback in free-hand interaction. In Proceedings of the 5th augmented human international conference (AH ’14), ACM, New York, NY, USA, 2014, p. 48:1–48:8. doi:10.1145/2582051.2582099
[BibTeX] [Abstract] [PDF]
Free-hand interaction with large displays is getting more common, for example in public settings and exertion games. Adding haptic feedback offers the potential for more realistic and immersive experiences. While vibrotactile feedback is well known, electrical muscle stimulation (EMS) has not yet been explored in free-hand interaction with large displays. EMS offers a wide range of different strengths and qualities of haptic feedback. In this paper we first systematically investigate the design space for haptic feedback. Second, we experimentally explore differences between strengths of EMS and vibrotactile feedback. Third, based on the results, we evaluate EMS and vibrotactile feedback with regard to different virtual objects (soft, hard) and interaction with different gestures (touch, grasp, punch) in front of a large display. The results provide a basis for the design of haptic feedback that is appropriate for the given type of interaction and the material.
@InProceedings{pfeiffer2014ah,
author = {Pfeiffer, Max and Schneegass, Stefan and Alt, Florian and Rohs, Michael},
title = {Let Me Grab This: A Comparison of EMS and Vibration for Haptic Feedback in Free-hand Interaction},
booktitle = {Proceedings of the 5th Augmented Human International Conference},
year = {2014},
series = {AH '14},
pages = {48:1--48:8},
address = {New York, NY, USA},
publisher = {ACM},
note = {pfeiffer2014ah},
abstract = {Free-hand interaction with large displays is getting more common, for example in public settings and exertion games. Adding haptic feedback offers the potential for more realistic and immersive experiences. While vibrotactile feedback is well known, electrical muscle stimulation (EMS) has not yet been explored in free-hand interaction with large displays. EMS offers a wide range of different strengths and qualities of haptic feedback. In this paper we first systematically investigate the design space for haptic feedback. Second, we experimentally explore differences between strengths of EMS and vibrotactile feedback. Third, based on the results, we evaluate EMS and vibrotactile feedback with regard to different virtual objects (soft, hard) and interaction with different gestures (touch, grasp, punch) in front of a large display. The results provide a basis for the design of haptic feedback that is appropriate for the given type of interaction and the material.},
acmid = {2582099},
articleno = {48},
doi = {10.1145/2582051.2582099},
isbn = {978-1-4503-2761-9},
keywords = {electrical muscle stimulation, free-hand interaction, haptic feedback, large displays, tactile feedback},
location = {Kobe, Japan},
numpages = {8},
timestamp = {2014.03.07},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeiffer2014ah.pdf},
}
F. Alt, S. Schneegass, J. Auda, R. Rzayev, and N. Broy. Using eye-tracking to support interaction with layered 3d interfaces on stereoscopic displays. In Proceedings of the 19th international conference on intelligent user interfaces (IUI ’14), ACM, New York, NY, USA, 2014, p. 267–272. doi:10.1145/2557500.2557518
[BibTeX] [Abstract] [PDF]
In this paper, we investigate the concept of gaze-based interaction with 3D user interfaces. We currently see stereo vision displays becoming ubiquitous, particularly as auto-stereoscopy enables the perception of 3D content without the use of glasses. As a result, application areas for 3D beyond entertainment in cinema or at home emerge, including work settings, mobile phones, public displays, and cars. At the same time, eye tracking is hitting the consumer market with low-cost devices. We envision eye trackers in the future to be integrated with consumer devices (laptops, mobile phones, displays), hence allowing the user’s gaze to be analyzed and used as input for interactive applications. A particular challenge when applying this concept to 3D displays is that current eye trackers provide the gaze point in 2D only (x and y coordinates). In this paper, we compare the performance of two methods that use the eye’s physiology for calculating the gaze point in 3D space, hence enabling gaze-based interaction with stereoscopic content. Furthermore, we provide a comparison of gaze interaction in 2D and 3D with regard to user experience and performance. Our results show that with current technology, eye tracking on stereoscopic displays is possible with similar performance as on standard 2D screens.
@InProceedings{alt2014iui,
author = {Alt, Florian and Schneegass, Stefan and Auda, Jonas and Rzayev, Rufat and Broy, Nora},
title = {Using Eye-tracking to Support Interaction with Layered 3D Interfaces on Stereoscopic Displays},
booktitle = {Proceedings of the 19th International Conference on Intelligent User Interfaces},
year = {2014},
series = {IUI '14},
pages = {267--272},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2014iui},
abstract = {In this paper, we investigate the concept of gaze-based interaction with 3D user interfaces. We currently see stereo vision displays becoming ubiquitous, particularly as auto-stereoscopy enables the perception of 3D content without the use of glasses. As a result, application areas for 3D beyond entertainment in cinema or at home emerge, including work settings, mobile phones, public displays, and cars. At the same time, eye tracking is hitting the consumer market with low-cost devices. We envision eye trackers in the future to be integrated with consumer devices (laptops, mobile phones, displays), hence allowing the user's gaze to be analyzed and used as input for interactive applications. A particular challenge when applying this concept to 3D displays is that current eye trackers provide the gaze point in 2D only (x and y coordinates). In this paper, we compare the performance of two methods that use the eye's physiology for calculating the gaze point in 3D space, hence enabling gaze-based interaction with stereoscopic content. Furthermore, we provide a comparison of gaze interaction in 2D and 3D with regard to user experience and performance. Our results show that with current technology, eye tracking on stereoscopic displays is possible with similar performance as on standard 2D screens.},
acmid = {2557518},
doi = {10.1145/2557500.2557518},
isbn = {978-1-4503-2184-6},
keywords = {3d, eye tracking, gaze interaction, stereoscopic displays},
location = {Haifa, Israel},
numpages = {6},
timestamp = {2014.02.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2014iui.pdf},
}

### 2013

F. Alt, A Design Space for Pervasive Advertising on Public Displays, Stuttgart, Germany: Ph.D. Thesis, 2013.
[BibTeX] [Abstract] [PDF]
@Book{alt2013diss,
title = {{A Design Space for Pervasive Advertising on Public Displays}},
publisher = {Ph.D. Thesis},
year = {2013},
author = {Alt, Florian},
note = {alt2013diss},
timestamp = {2013.12.07},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013diss.pdf},
}
F. Alt and B. Pfleging. Sonify – A Platform for the Sonification of Text Messages. In Proceedings of Mensch & Computer 2013 (), 2013.
[BibTeX] [Abstract] [PDF]
Sonification of text messages offers a great potential for personalization while at the same timeallowing rich information to be mediated. For example, ringtones are the major form of personalizationon smartphones besides apps and background images. Ringtones are often used as a form of selfexpressionby the smartphone owner (e.g., using ones favorite sound track as standard ringtone), butalso to identify the caller or sender of a message (e.g., the user knows who is calling without taking thephone out of the pocket). We believe this approach to be applicable to a wide variety of text messages,such as SMS, email, or IM. In this paper, we first present a web-based platform that allows usergeneratedmappings for text sonification to be created and managed. An API enables any application tosend a text message and receive the sonification in the form of a MIDI file. To showcase the potential,we implemented an Android app that sonifies incoming SMS. Second, we evaluate the feasibility of ourapproach and show that sonified messages are equally effective as ringtones when conveying metainformation.
@InProceedings{alt2013muc,
author = {Florian Alt AND Bastian Pfleging},
title = {{Sonify -- A Platform for the Sonification of Text Messages}},
booktitle = {{Proceedings of Mensch \& Computer 2013}},
year = {2013},
note = {alt2013muc},
abstract = {Sonification of text messages offers a great potential for personalization while at the same timeallowing rich information to be mediated. For example, ringtones are the major form of personalizationon smartphones besides apps and background images. Ringtones are often used as a form of selfexpressionby the smartphone owner (e.g., using ones favorite sound track as standard ringtone), butalso to identify the caller or sender of a message (e.g., the user knows who is calling without taking thephone out of the pocket). We believe this approach to be applicable to a wide variety of text messages,such as SMS, email, or IM. In this paper, we first present a web-based platform that allows usergeneratedmappings for text sonification to be created and managed. An API enables any application tosend a text message and receive the sonification in the form of a MIDI file. To showcase the potential,we implemented an Android app that sonifies incoming SMS. Second, we evaluate the feasibility of ourapproach and show that sonified messages are equally effective as ringtones when conveying metainformation.},
owner = {florianalt},
timestamp = {2013.10.04},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013muc.pdf},
}
M. Pfeiffer, S. Schneegass, and F. Alt. Supporting Interaction in Public Space with Electrical Muscle Stimulation. In Adjunct Proceedings of the 2013 ACM International Joint Conference on Pervasive and Ubiquitous Computing (Ubicomp’13), ACM Press, 2013.
[BibTeX] [Abstract] [PDF]
As displays in public space are augmented with sensors,such as the Kinect, they enable passersby to interact withthe content on the screen. As of today, feedback on theuser action in such environments is usually limited to thevisual channel. However, we believe that more immediateand intense forms, in particular haptic feedback, do notonly increase the user experience, but may also have astrong impact on user attention and memorization of thecontent encountered during the interaction. Hapticfeedback can today be achieved through vibration on themobile phone, which is strongly dependent on the locationof the device. We envision that fabrics, such as underwear,can in the future be equipped with electrical musclestimulation, thus providing a more natural and direct wayof haptic feedback. In this demo we aim to showcase thepotential of applying electrical muscle stimulation asdirect haptic feedback during interaction in public spacesin the context of a Kinect-based game for public displays.
@InProceedings{pfeiffer2013ubicompadj,
author = {Max Pfeiffer AND Stefan Schneegass AND Florian Alt},
booktitle = {{Adjunct Proceedings of the 2013 ACM International Joint Conference on Pervasive and Ubiquitous Computing}},
title = {{Supporting Interaction in Public Space with Electrical Muscle Stimulation}},
year = {2013},
publisher = {ACM Press},
series = {Ubicomp'13},
abstract = {As displays in public space are augmented with sensors,such as the Kinect, they enable passersby to interact withthe content on the screen. As of today, feedback on theuser action in such environments is usually limited to thevisual channel. However, we believe that more immediateand intense forms, in particular haptic feedback, do notonly increase the user experience, but may also have astrong impact on user attention and memorization of thecontent encountered during the interaction. Hapticfeedback can today be achieved through vibration on themobile phone, which is strongly dependent on the locationof the device. We envision that fabrics, such as underwear,can in the future be equipped with electrical musclestimulation, thus providing a more natural and direct wayof haptic feedback. In this demo we aim to showcase thepotential of applying electrical muscle stimulation asdirect haptic feedback during interaction in public spacesin the context of a Kinect-based game for public displays.},
owner = {florianalt},
timestamp = {2013.09.20},
}
N. Memarovic, M. Langheinrich, K. Cheverst, N. Taylor, and F. Alt. P-layers – a layered framework addressing the multifaceted issues facing community-supporting public display deployments. Acm trans. comput.-hum. interact., vol. 20, iss. 3, p. 17:1–17:34, 2013. doi:10.1145/2491500.2491505
[BibTeX] [Abstract] [PDF]
The proliferation of digital signage systems has prompted a wealth of research that attempts to use public displays for more than just advertisement or transport schedules, such as their use for supporting communities. However, deploying and maintaining display systems “in the wild” that can support communities is challenging. Based on the authors’ experiences in designing and fielding a diverse range of community-supporting public display deployments, we identify a large set of challenges and issues that researchers working in this area are likely to encounter. Grouping them into five distinct layers – (1) hardware, (2) system architecture, (3) content, (4) system interaction, and (5) community interaction design – we draw up the P-LAYERS framework to enable a more systematic appreciation of the diverse range of issues associated with the development, the deployment, and the maintenance of such systems. Using three of our own deployments as illustrative examples, we will describe both our experiences within each individual layer, as well as point out interactions between the layers. We believe our framework provides a valuable aid for researchers looking to work in this space, alerting them to the issues they are likely to encounter during their deployments, and help them plan accordingly.
@Article{memarovic2013tochi,
author = {Memarovic, Nemanja and Langheinrich, Marc and Cheverst, Keith and Taylor, Nick and Alt, Florian},
title = {P-LAYERS -- A Layered Framework Addressing the Multifaceted Issues Facing Community-Supporting Public Display Deployments},
journal = {ACM Trans. Comput.-Hum. Interact.},
year = {2013},
volume = {20},
number = {3},
pages = {17:1--17:34},
month = jul,
issn = {1073-0516},
note = {memarovic2013tochi},
abstract = {The proliferation of digital signage systems has prompted a wealth of research that attempts to use public displays for more than just advertisement or transport schedules, such as their use for supporting communities. However, deploying and maintaining display systems “in the wild” that can support communities is challenging. Based on the authors’ experiences in designing and fielding a diverse range of community-supporting public display deployments, we identify a large set of challenges and issues that researchers working in this area are likely to encounter. Grouping them into five distinct layers -- (1) hardware, (2) system architecture, (3) content, (4) system interaction, and (5) community interaction design -- we draw up the P-LAYERS framework to enable a more systematic appreciation of the diverse range of issues associated with the development, the deployment, and the maintenance of such systems. Using three of our own deployments as illustrative examples, we will describe both our experiences within each individual layer, as well as point out interactions between the layers. We believe our framework provides a valuable aid for researchers looking to work in this space, alerting them to the issues they are likely to encounter during their deployments, and help them plan accordingly.},
acmid = {2491505},
address = {New York, NY, USA},
articleno = {17},
doi = {10.1145/2491500.2491505},
issue_date = {July 2013},
keywords = {Community interaction, community needs, pervasive displays, public displays},
numpages = {34},
publisher = {ACM},
timestamp = {2013.06.17},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2013tochi.pdf},
}
F. Alt, A. S. Shirazi, T. Kubitza, and A. Schmidt. Interaction techniques for creating and exchanging content with public displays. In Proceedings of the sigchi conference on human factors in computing systems (CHI ’13), ACM, New York, NY, USA, 2013, p. 1709–1718. doi:10.1145/2470654.2466226
[BibTeX] [Abstract] [PDF]
Falling hardware prices and ever more displays being connected to the Internet will lead to large public display networks, potentially forming a novel communication medium. We envision that such networks are not restricted to display owners and advertisers anymore, but allow also passersby (e.g., customers) to exchange content, similar to traditional public notice areas, such as bulletin boards. In this context it is crucial to understand emerging practices and provide easy and straight forward interaction techniques to be used for creating and exchanging content. In this paper, we present Digifieds, a digital public notice area we built to investigate and compare possible interaction techniques. Based on a lab study we show that using direct touch at the display as well as using the mobile phone as a complementing interaction technology are most suitable. Direct touch at the display closely resembles the interaction known from classic bulletin boards and provides the highest usability. Mobile phones preserve the users’ privacy as they exchange (sensitive) data with the display and at the same time allow content to be created on-the-go or to be retrieved.
@InProceedings{alt2013chi,
author = {Alt, Florian and Shirazi, Alireza Sahami and Kubitza, Thomas and Schmidt, Albrecht},
title = {Interaction techniques for creating and exchanging content with public displays},
booktitle = {Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
year = {2013},
series = {CHI '13},
pages = {1709--1718},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2013chi},
abstract = {Falling hardware prices and ever more displays being connected to the Internet will lead to large public display networks, potentially forming a novel communication medium. We envision that such networks are not restricted to display owners and advertisers anymore, but allow also passersby (e.g., customers) to exchange content, similar to traditional public notice areas, such as bulletin boards. In this context it is crucial to understand emerging practices and provide easy and straight forward interaction techniques to be used for creating and exchanging content. In this paper, we present Digifieds, a digital public notice area we built to investigate and compare possible interaction techniques. Based on a lab study we show that using direct touch at the display as well as using the mobile phone as a complementing interaction technology are most suitable. Direct touch at the display closely resembles the interaction known from classic bulletin boards and provides the highest usability. Mobile phones preserve the users' privacy as they exchange (sensitive) data with the display and at the same time allow content to be created on-the-go or to be retrieved.},
acmid = {2466226},
doi = {10.1145/2470654.2466226},
isbn = {978-1-4503-1899-0},
keywords = {classified ads, digifieds, interaction, public displays},
location = {Paris, France},
numpages = {10},
owner = {florianalt},
timestamp = {2013.06.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013chi.pdf},
}
R. José, J. Cardoso, F. Alt, S. Clinch, and N. Davies. Mobile applications for open display networks: common design considerations. In Proceedings of the 2nd acm international symposium on pervasive displays (PerDis ’13), ACM, New York, NY, USA, 2013, p. 97–102. doi:10.1145/2491568.2491590
[BibTeX] [Abstract] [PDF]
Mobile devices can be a powerful tool for interaction with public displays, but mobile applications supporting this form of interaction are not yet part of our everyday reality. There are no widely accepted abstractions, standards, or practices that may enable systematic interaction between mobile devices and public displays. We envision public displays to move away from a world of closed display networks to scenarios where mobile applications could allow people to interact with the myriad of displays they might encounter during their everyday trips. In this research, we study the key processes involved in this collaborative interaction between public shared displays and mobile applications. Based on the lessons learned from our own development and deployment of 3 applications, and also on the analysis of the interactive features described in the literature, we have identified 8 key processes that may shape this form of interaction: Discovery, Association, Presence Management, Exploration, Interface Migration, Controller, Media Upload and Media Download. The contribution of this work is the identification of these high-level processes and an elicitation of the main design considerations for display networks.
@InProceedings{jose2013perdis,
author = {Jos{\'e}, Rui and Cardoso, Jorge and Alt, Florian and Clinch, Sarah and Davies, Nigel},
title = {Mobile applications for open display networks: common design considerations},
booktitle = {Proceedings of the 2nd ACM International Symposium on Pervasive Displays},
year = {2013},
series = {PerDis '13},
pages = {97--102},
address = {New York, NY, USA},
publisher = {ACM},
note = {jose2013perdis},
abstract = {Mobile devices can be a powerful tool for interaction with public displays, but mobile applications supporting this form of interaction are not yet part of our everyday reality. There are no widely accepted abstractions, standards, or practices that may enable systematic interaction between mobile devices and public displays. We envision public displays to move away from a world of closed display networks to scenarios where mobile applications could allow people to interact with the myriad of displays they might encounter during their everyday trips. In this research, we study the key processes involved in this collaborative interaction between public shared displays and mobile applications. Based on the lessons learned from our own development and deployment of 3 applications, and also on the analysis of the interactive features described in the literature, we have identified 8 key processes that may shape this form of interaction: Discovery, Association, Presence Management, Exploration, Interface Migration, Controller, Media Upload and Media Download. The contribution of this work is the identification of these high-level processes and an elicitation of the main design considerations for display networks.},
acmid = {2491590},
doi = {10.1145/2491568.2491590},
isbn = {978-1-4503-2096-2},
keywords = {mobile applications, open display networks, public displays},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/jose2013perdis.pdf},
}
N. Broy, F. Alt, S. Schneegass, N. Henze, and A. Schmidt. Perceiving layered information on 3d displays using binocular disparity. In Proceedings of the 2nd acm international symposium on pervasive displays (PerDis ’13), ACM, New York, NY, USA, 2013, p. 61–66. doi:10.1145/2491568.2491582
[BibTeX] [Abstract] [PDF]
3D displays are hitting the mass market. They are integrated in consumer TVs, notebooks, and mobile phones and are mainly used for virtual reality as well as video content. We see large potential in using depth also for structuring information. Our specific use case is 3D displays integrated in cars. The capabilities of such displays could be used to present relevant information to the driver in a fast and easy-to-understand way, e.g., by functionality-based clustering. However, excessive parallaxes can cause discomfort and in turn negatively influence the primary driving task. This requires a reasonable choice of parallax boundaries. The contribution of this paper is twofold. First, we identify the comfort zone when perceiving 3D content. Second, we determine a minimum depth distance between objects that still enables users to quickly and accurately separate the two depth planes. The results yield that in terms of task completion time the optimum distance from screen level is up to 35.9 arc-min angular disparity behind the screen plane. A distance of at least 2.7 arc-min difference in angular disparity between the objects significantly decreases time for layer separation. Based on the results we derive design implications.
@InProceedings{broy2013perdis,
author = {Broy, Nora and Alt, Florian and Schneegass, Stefan and Henze, Niels and Schmidt, Albrecht},
title = {Perceiving layered information on 3D displays using binocular disparity},
booktitle = {Proceedings of the 2nd ACM International Symposium on Pervasive Displays},
year = {2013},
series = {PerDis '13},
pages = {61--66},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2013perdis},
abstract = {3D displays are hitting the mass market. They are integrated in consumer TVs, notebooks, and mobile phones and are mainly used for virtual reality as well as video content. We see large potential in using depth also for structuring information. Our specific use case is 3D displays integrated in cars. The capabilities of such displays could be used to present relevant information to the driver in a fast and easy-to-understand way, e.g., by functionality-based clustering. However, excessive parallaxes can cause discomfort and in turn negatively influence the primary driving task. This requires a reasonable choice of parallax boundaries. The contribution of this paper is twofold. First, we identify the comfort zone when perceiving 3D content. Second, we determine a minimum depth distance between objects that still enables users to quickly and accurately separate the two depth planes. The results yield that in terms of task completion time the optimum distance from screen level is up to 35.9 arc-min angular disparity behind the screen plane. A distance of at least 2.7 arc-min difference in angular disparity between the objects significantly decreases time for layer separation. Based on the results we derive design implications.},
acmid = {2491582},
doi = {10.1145/2491568.2491582},
isbn = {978-1-4503-2096-2},
keywords = {3D displays, automotive user interfaces, human factors},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2013perdis.pdf},
}
F. Alt, S. Schneegass, M. Girgis, and A. Schmidt. Cognitive effects of interactive public display applications. In Proceedings of the 2nd acm international symposium on pervasive displays (PerDis ’13), ACM, New York, NY, USA, 2013, p. 13–18. doi:10.1145/2491568.2491572
[BibTeX] [Abstract] [PDF]
Many public displays are nowadays equipped with different types of sensors. Such displays allow engaging and persistent user experiences to be created, e.g., in the form of gesture-controlled games or content exploration using direct touch at the display. However, as digital displays replace traditional posters and billboards, display owners are reluctant to deploy interactive content, but rather adapt traditional, non-interactive content. The main reason is, that the benefit of such interactive deployments is not obvious. Our hypothesis is that interactivity has a cognitive effect on users and therefore increases the ability to remember what they have seen on the screen – which is beneficial both for the display owner and the user. In this paper we systematically investigate the impact of interactive content on public displays on the users’ cognition in different situations. Our findings indicate that overall memorability is positively affected as users interact. Based on these findings we discuss design implications for interactive public displays.
@InProceedings{alt2013perdis,
author = {Alt, Florian and Schneegass, Stefan and Girgis, Michael and Schmidt, Albrecht},
title = {Cognitive effects of interactive public display applications},
booktitle = {Proceedings of the 2nd ACM International Symposium on Pervasive Displays},
year = {2013},
series = {PerDis '13},
pages = {13--18},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2013perdis},
abstract = {Many public displays are nowadays equipped with different types of sensors. Such displays allow engaging and persistent user experiences to be created, e.g., in the form of gesture-controlled games or content exploration using direct touch at the display. However, as digital displays replace traditional posters and billboards, display owners are reluctant to deploy interactive content, but rather adapt traditional, non-interactive content. The main reason is, that the benefit of such interactive deployments is not obvious. Our hypothesis is that interactivity has a cognitive effect on users and therefore increases the ability to remember what they have seen on the screen -- which is beneficial both for the display owner and the user. In this paper we systematically investigate the impact of interactive content on public displays on the users' cognition in different situations. Our findings indicate that overall memorability is positively affected as users interact. Based on these findings we discuss design implications for interactive public displays.},
acmid = {2491572},
doi = {10.1145/2491568.2491572},
isbn = {978-1-4503-2096-2},
keywords = {digital signage, interactivity, public display, recall, recognition},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013perdis.pdf},
}
N. Memarovic, K. Cheverst, M. Langheinrich, I. Elhart, and F. Alt. Tethered or free to roam: the design space of limiting content access on community displays. In Proceedings of the 2nd acm international symposium on pervasive displays (PerDis ’13), ACM, New York, NY, USA, 2013, p. 127–132. doi:10.1145/2491568.2491596
[BibTeX] [Abstract] [PDF]
Many design decisions need to be made when creating situated public displays that aim to serve a community. One such decision concerns access to its contents: should users be able to access content remotely, e.g., via a web page, or should this be limited to users who are co-located with the display? A similar decision has to be made for community content upload: do posters need to be co-located with the display or can posts be made from any location? In other words, content display and creation can be ‘tethered’ to a display or it can be ‘free to roam’, i.e., accessible from anywhere. In this paper we analyze prior community display deployments in an attempt to explore this space and produce a taxonomy that highlights the inherent design choices. Furthermore, we discuss some of the reasons that may underlie these choices and identify opportunities for design.
@InProceedings{memarovic2013perdis,
author = {Memarovic, Nemanja and Cheverst, Keith and Langheinrich, Marc and Elhart, Ivan and Alt, Florian},
title = {Tethered or free to roam: the design space of limiting content access on community displays},
booktitle = {Proceedings of the 2nd ACM International Symposium on Pervasive Displays},
year = {2013},
series = {PerDis '13},
pages = {127--132},
address = {New York, NY, USA},
publisher = {ACM},
note = {memarovic2013perdis},
abstract = {Many design decisions need to be made when creating situated public displays that aim to serve a community. One such decision concerns access to its contents: should users be able to access content remotely, e.g., via a web page, or should this be limited to users who are co-located with the display? A similar decision has to be made for community content upload: do posters need to be co-located with the display or can posts be made from any location? In other words, content display and creation can be 'tethered' to a display or it can be 'free to roam', i.e., accessible from anywhere. In this paper we analyze prior community display deployments in an attempt to explore this space and produce a taxonomy that highlights the inherent design choices. Furthermore, we discuss some of the reasons that may underlie these choices and identify opportunities for design.},
acmid = {2491596},
doi = {10.1145/2491568.2491596},
isbn = {978-1-4503-2096-2},
keywords = {collocation, communities, content, public displays},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2013perdis.pdf},
}
F. Alt and S. Schneegass. Towards understanding the cognitive effects of interactivity. In Proccedings of the 1st workshop on experiencing interactivity in public space (eips) (EIPS’13), 2013.
[BibTeX] [Abstract] [PDF]
Cheap and easy-to-deploy consumer hardware, such as theMicrosoft Kinect, touch screens, and smartphones drivean increasing proliferation of public space with interactiveapplications. Such applications include artistic, playful,and informative content on public displays. Though suchapplications are in general positively perceived by users,their benet is in many cases not clear. In this paper weargue that while most current (advertising) content onpublic displays aims at stimulating user action (e.g., makinga purchase), interactive applications are also suitableto support cognition. In our work, we focus on awarenessas one particular form of cognition and assess it by measuringrecall and recognition. This is not only interestingfor advertising but for any type of applications that requiresthe user to remember information. We contribute adesign space and map out directions for future research.
@InProceedings{alt2013eips,
author = {Florian Alt AND Stefan Schneegass},
booktitle = {Proccedings of the 1st Workshop on Experiencing Interactivity in Public Space (EIPS)},
title = {Towards Understanding the Cognitive Effects of Interactivity},
year = {2013},
note = {alt2013eips},
series = {EIPS'13},
abstract = {Cheap and easy-to-deploy consumer hardware, such as theMicrosoft Kinect, touch screens, and smartphones drivean increasing proliferation of public space with interactiveapplications. Such applications include artistic, playful,and informative content on public displays. Though suchapplications are in general positively perceived by users,their benet is in many cases not clear. In this paper weargue that while most current (advertising) content onpublic displays aims at stimulating user action (e.g., makinga purchase), interactive applications are also suitableto support cognition. In our work, we focus on awarenessas one particular form of cognition and assess it by measuringrecall and recognition. This is not only interestingfor advertising but for any type of applications that requiresthe user to remember information. We contribute adesign space and map out directions for future research.},
owner = {florianalt},
timestamp = {2013.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013eips.pdf},
}

### 2012

N. Memarovic, M. Langheinrich, F. Alt, I. Elhart, S. Hosio, and E. Rubegni. Using public displays to stimulate passive engagement, active engagement, and discovery in public spaces. In Proceedings of the 4th media architecture biennale conference: participation (MAB ’12), ACM, New York, NY, USA, 2012, p. 55–64. doi:10.1145/2421076.2421086
[BibTeX] [Abstract] [PDF]
In their influential book “Public space” Carr et al. describe essential human needs that public spaces fulfill: (1) passive engagement with the environment, where we observe what others are doing; (2) active engagement through intellectual challenges posed by the space, or through engagement with the people in it; and (3) excitement of novel discoveries within the space. An often underused resource in public spaces – public displays – can be used to stimulate these needs. In this paper we argue for a new research direction that explores how public displays can stimulate such essential needs in public spaces. We describe and conceptualize related processes that occur around public displays, based on in-depth observations of people interacting with a publicly fielded display application in a city center. Our conceptualization is meant to lay the foundations for designing engaging public display systems that stimulate PACD, and for supporting the analysis of existing deployments.
@InProceedings{memarovic2012mab,
author = {Memarovic, Nemanja and Langheinrich, Marc and Alt, Florian and Elhart, Ivan and Hosio, Simo and Rubegni, Elisa},
title = {Using Public Displays to Stimulate Passive Engagement, Active Engagement, and Discovery in Public Spaces},
booktitle = {Proceedings of the 4th Media Architecture Biennale Conference: Participation},
year = {2012},
series = {MAB '12},
pages = {55--64},
address = {New York, NY, USA},
publisher = {ACM},
note = {memarovic2012mab},
abstract = {In their influential book "Public space" Carr et al. describe essential human needs that public spaces fulfill: (1) passive engagement with the environment, where we observe what others are doing; (2) active engagement through intellectual challenges posed by the space, or through engagement with the people in it; and (3) excitement of novel discoveries within the space. An often underused resource in public spaces -- public displays -- can be used to stimulate these needs. In this paper we argue for a new research direction that explores how public displays can stimulate such essential needs in public spaces. We describe and conceptualize related processes that occur around public displays, based on in-depth observations of people interacting with a publicly fielded display application in a city center. Our conceptualization is meant to lay the foundations for designing engaging public display systems that stimulate PACD, and for supporting the analysis of existing deployments.},
acmid = {2421086},
doi = {10.1145/2421076.2421086},
isbn = {978-1-4503-1792-4},
keywords = {community interaction, identity cognition, public displays, public space, urban computing, urban informatics},
location = {Aarhus, Denmark},
numpages = {10},
timestamp = {2012.11.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2012mab.pdf},
}
F. Alt, A. S. Shirazi, A. Schmidt, and J. Mennenöh. Increasing the user’s attention on the web: using implicit interaction based on gaze behavior to tailor content. In Proceedings of the 7th nordic conference on human-computer interaction: making sense through design (NordiCHI ’12), ACM, New York, NY, USA, 2012, p. 544–553. doi:10.1145/2399016.2399099
[BibTeX] [Abstract] [PDF]
The World Wide Web has evolved into a widely used interactive application platform, providing information, products, and services. With eye trackers we envision that gaze information as an additional input channel can be used in the future to adapt and tailor web content (e.g., news, information, ads) towards the users’ attention as they implicitly interact with web pages. We present a novel approach, which allows web content to be customized on-the-fly based on the the user’s gaze behavior (dwell time, duration of fixations, and number of fixations). Our system analyzes the gaze path on a page and uses this information to create adaptive content on subsequent pages. As a proof-of-concept we report on a case study with 12 participants. We presented them both randomly chosen content (baseline) as well as content chosen based on their gaze-behavior. We found a significant increase of attention towards the adapted content and evidence for changes in the user attitude based on the Elaboration Likelihood Model.
@InProceedings{alt2012nordichi,
author = {Alt, Florian and Shirazi, Alireza Sahami and Schmidt, Albrecht and Mennen\"{o}h, Julian},
title = {Increasing the User's Attention on the Web: Using Implicit Interaction Based on Gaze Behavior to Tailor Content},
booktitle = {Proceedings of the 7th Nordic Conference on Human-Computer Interaction: Making Sense Through Design},
year = {2012},
series = {NordiCHI '12},
pages = {544--553},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2012nordichi},
abstract = {The World Wide Web has evolved into a widely used interactive application platform, providing information, products, and services. With eye trackers we envision that gaze information as an additional input channel can be used in the future to adapt and tailor web content (e.g., news, information, ads) towards the users' attention as they implicitly interact with web pages. We present a novel approach, which allows web content to be customized on-the-fly based on the the user's gaze behavior (dwell time, duration of fixations, and number of fixations). Our system analyzes the gaze path on a page and uses this information to create adaptive content on subsequent pages. As a proof-of-concept we report on a case study with 12 participants. We presented them both randomly chosen content (baseline) as well as content chosen based on their gaze-behavior. We found a significant increase of attention towards the adapted content and evidence for changes in the user attitude based on the Elaboration Likelihood Model.},
acmid = {2399099},
doi = {10.1145/2399016.2399099},
isbn = {978-1-4503-1482-4},
keywords = {adaptative content, eye tracking, implicit interaction},
location = {Copenhagen, Denmark},
numpages = {10},
timestamp = {2012.10.19},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012nordichi.pdf},
}
F. Alt, A. Sahami Shirazi, A. Schmidt, and R. Atterer. Bridging waiting times on web pages. In Proceedings of the 14th international conference on human-computer interaction with mobile devices and services (MobileHCI ’12), ACM, New York, NY, USA, 2012, p. 305–308. doi:10.1145/2371574.2371619
[BibTeX] [Abstract] [PDF]
High-speed Internet connectivity makes browsing a convenient task. However, there are many situations in which surfing the web is still slow due to limited bandwidth, slow servers, or complex queries. As a result, loading web pages can take several seconds, making (mobile) browsing cumbersome. We present an approach which makes use of the time spent on waiting for the next page, by bridging the wait with extra cached or preloaded content. We show how the content (e.g., news, Twitter) can be adapted to the user’s interests and to the context of use, hence making mobile surfing more comfortable. We compare two approaches: in time-multiplex mode, the entire screen displays bridging content until the loading is finished. In space-multiplex mode, content is displayed alongside the requested content while it loads. We use an HTTP proxy to intercept requests and add JavaScript code, which allows the bridging content from websites of our choice to be inserted. The approach was evaluated with 15 participants, assessing suitable content and usability.
@InProceedings{alt2012mobilehci,
author = {Alt, Florian and Sahami Shirazi, Alireza and Schmidt, Albrecht and Atterer, Richard},
title = {Bridging Waiting Times on Web Pages},
booktitle = {Proceedings of the 14th International Conference on Human-computer Interaction with Mobile Devices and Services},
year = {2012},
series = {MobileHCI '12},
pages = {305--308},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2012mobilehci},
abstract = {High-speed Internet connectivity makes browsing a convenient task. However, there are many situations in which surfing the web is still slow due to limited bandwidth, slow servers, or complex queries. As a result, loading web pages can take several seconds, making (mobile) browsing cumbersome. We present an approach which makes use of the time spent on waiting for the next page, by bridging the wait with extra cached or preloaded content. We show how the content (e.g., news, Twitter) can be adapted to the user's interests and to the context of use, hence making mobile surfing more comfortable. We compare two approaches: in time-multiplex mode, the entire screen displays bridging content until the loading is finished. In space-multiplex mode, content is displayed alongside the requested content while it loads. We use an HTTP proxy to intercept requests and add JavaScript code, which allows the bridging content from websites of our choice to be inserted. The approach was evaluated with 15 participants, assessing suitable content and usability.},
acmid = {2371619},
doi = {10.1145/2371574.2371619},
isbn = {978-1-4503-1105-2},
keywords = {mobile device, waiting time, www},
location = {San Francisco, California, USA},
numpages = {4},
timestamp = {2012.10.18},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mobilehci.pdf},
}
B. Pfleging, F. Alt, and A. Schmidt. Meaningful melodies – personal sonification of text messages for mobile devices. In Adjunct proceedings of the 14th acm sigchi’s international conference on human-computer interaction with mobile devices and services (MobileHCI’12), San Francisco, CA, US, 2012.
[BibTeX] [Abstract] [PDF]
Mobile phones oer great potential for personalization.Besides apps and background images, ringtones are themajor form of personalization. They are most often usedto have a personal sound for incoming texts and calls.Furthermore, ringtones are used to identify the caller orsender of a message. In parts, this function is utilitarian(e.g., caller identication without looking at the phone)but it is also a form of self-expression (e.g., favorite tuneas standard ringtone). We investigate how audio can beused to convey richer information. In this demo we showhow sonications of SMS can be used to encode informationabout the sender’s identity as well as the content andintention of a message based on exible, user-generatedmappings. We present a platform that allows arbitrarymappings to be managed and apps to be connected inorder to create a sonication of any message. Using abackground app on Android, we show the utility of theapproach for mobile devices.
@InProceedings{pfleging2012mobilehciadj,
author = {Bastian Pfleging and Florian Alt and Albrecht Schmidt},
booktitle = {Adjunct Proceedings of the 14th ACM SIGCHI's International Conference on Human-Computer Interaction with Mobile Devices and Services},
title = {Meaningful Melodies - Personal Sonification of Text Messages for Mobile Devices},
year = {2012},
address = {San Francisco, CA, US},
month = {sep},
series = {MobileHCI'12},
abstract = {Mobile phones oer great potential for personalization.Besides apps and background images, ringtones are themajor form of personalization. They are most often usedto have a personal sound for incoming texts and calls.Furthermore, ringtones are used to identify the caller orsender of a message. In parts, this function is utilitarian(e.g., caller identication without looking at the phone)but it is also a form of self-expression (e.g., favorite tuneas standard ringtone). We investigate how audio can beused to convey richer information. In this demo we showhow sonications of SMS can be used to encode informationabout the sender's identity as well as the content andintention of a message based on exible, user-generatedmappings. We present a platform that allows arbitrarymappings to be managed and apps to be connected inorder to create a sonication of any message. Using abackground app on Android, we show the utility of theapproach for mobile devices.},
owner = {flo},
timestamp = {2012.09.01},
}
S. Schneegass, F. Alt, and A. Schmidt. Mobile interaction with ads on public display networks. In Proceedings of the 10th international conference on mobile systems, applications, and services (MobiSys’12), 2012, p. 479–480.
[BibTeX] [Abstract] [PDF]
In public places we can observe that many conventional displaysare replaced by digital displays, a lot of them networked. Thesedisplays mainly show advertising in a similar way to television´scommercial break not exploiting the opportunities of the new medium[1]. Several approaches of interaction between mobile devicesand public displays have been investigated over the last 15years. In this demo we concentrate on challenges that are specificto public displays used for advertising. In particular we focus onhow new approaches for interaction with content, means for contentcreation, and tools for follow-ups can be implemented basedon mobile devices. With Digifieds we present a research systemthat has been used to explore different research questions and toshowcase the potential of interactive advertising in public space.
@InProceedings{schneegass2012mobisysadj,
author = {Schneegass, S. and Alt, F. and Schmidt, A.},
booktitle = {Proceedings of the 10th International Conference on Mobile Systems, Applications, and Services},
title = {Mobile Interaction with Ads on Public Display Networks},
year = {2012},
organization = {ACM},
pages = {479--480},
series = {MobiSys'12},
abstract = {In public places we can observe that many conventional displaysare replaced by digital displays, a lot of them networked. Thesedisplays mainly show advertising in a similar way to television´scommercial break not exploiting the opportunities of the new medium[1]. Several approaches of interaction between mobile devicesand public displays have been investigated over the last 15years. In this demo we concentrate on challenges that are specificto public displays used for advertising. In particular we focus onhow new approaches for interaction with content, means for contentcreation, and tools for follow-ups can be implemented basedon mobile devices. With Digifieds we present a research systemthat has been used to explore different research questions and toshowcase the potential of interactive advertising in public space.},
journal = {Proceedings of the 10th International Conference on Mobile Systems, Applications, and Services},
timestamp = {2012.06.25},
}
F. Alt and S. Schneegass. A conceptual architecture for pervasive advertising in public display networks. In Proceedings of the 3rd workshop on infrastructure and design challenges of coupled display visual interfaces (PPD’12), 2012.
[BibTeX] [Abstract] [PDF]
This paper presents a conceptual architecture for pervasiveadvertising on public displays. It can help researchers andpractitioners to inform the design of future display networks.Due to falling hardware prices we see a strong proliferationof (public) places with displays and it is not only large outdooradvertisers anymore operating them. However, publicdisplays currently fail to attract the attention of the user– a challenge that could be overcome by networking displaysand deploying sensors that allow novel interaction techniquesand engaging user experiences to be created. Onemajor question is how to design an appropriate infrastructurethat caters to the conflicting needs of the involved stakeholders.Users want interesting content and their privacy beingrespected, advertisers want to gather the user’s data, anddisplay owners want to be in control of the content as theyfund the infrastructure. We identify the core components anddiscuss how control can be appropriately distributed amongstakeholders by presenting three different forms of the architecture(user-centered, advertiser-centered, trusted).
@InProceedings{alt2012ppd,
author = {Florian Alt AND Stefan Schneegass},
title = {A Conceptual Architecture for Pervasive Advertising in Public Display Networks},
booktitle = {Proceedings of the 3rd Workshop on Infrastructure and Design Challenges of Coupled Display Visual Interfaces},
year = {2012},
series = {PPD'12},
month = jun,
note = {alt2012ppd},
abstract = {This paper presents a conceptual architecture for pervasiveadvertising on public displays. It can help researchers andpractitioners to inform the design of future display networks.Due to falling hardware prices we see a strong proliferationof (public) places with displays and it is not only large outdooradvertisers anymore operating them. However, publicdisplays currently fail to attract the attention of the user– a challenge that could be overcome by networking displaysand deploying sensors that allow novel interaction techniquesand engaging user experiences to be created. Onemajor question is how to design an appropriate infrastructurethat caters to the conflicting needs of the involved stakeholders.Users want interesting content and their privacy beingrespected, advertisers want to gather the user’s data, anddisplay owners want to be in control of the content as theyfund the infrastructure. We identify the core components anddiscuss how control can be appropriately distributed amongstakeholders by presenting three different forms of the architecture(user-centered, advertiser-centered, trusted).},
timestamp = {2012.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012ppd.pdf},
}
N. Memarovic, M. Langheinrich, and F. Alt. The Interacting Places Framework: Conceptualizing Public Display Applications that Promote Community Interaction and Place Awareness. In Proceedings of the 2012 international symposium on pervasive displays (PerDis’12), ACM, New York, NY, USA, 2012, p. 71–76. doi:10.1145/2307798.2307805
[BibTeX] [Abstract] [PDF]
The proliferation of public displays, along with ubiquitous wireless communication and sensing technology, has made it possible to create a novel public communication medium: open networked pervasive displays would allow citizens to provide their own content, appropriate close-by displays, and increase their own awareness of a display’s surroundings and its local communities. We envision that such displays can create interacting places, i. e., public spaces that promote community interaction and place awareness. In this paper we describe our Interacting Places Framework (IPF), a conceptual framework for designing applications in this novel research space that we developed based on four distinct public display studies. Our IPF focuses on 4 elements: 1) content providers, i. e., entities that will supply content; 2) content viewers, i. e., people who are addressed by the content; 3) communication channels that deliver the content and range from inclusive, i. e., open-for-everyone, to exclusive, i. e., closed-group channels; and 4) an awareness diffusion layer that describes how community awareness building happens both explicitly, i. e., through content tailored towards a specific audience, and implicitly, by observing output for other people.
@InProceedings{memarovic2012perdis,
author = {Memarovic, Nemanja and Langheinrich, Marc and Alt, Florian},
title = {{The Interacting Places Framework: Conceptualizing Public Display Applications that Promote Community Interaction and Place Awareness}},
booktitle = {Proceedings of the 2012 International Symposium on Pervasive Displays},
year = {2012},
series = {PerDis'12},
pages = {71--76},
address = {New York, NY, USA},
month = {jun},
publisher = {ACM},
note = {memarovic2012perdis},
abstract = {The proliferation of public displays, along with ubiquitous wireless communication and sensing technology, has made it possible to create a novel public communication medium: open networked pervasive displays would allow citizens to provide their own content, appropriate close-by displays, and increase their own awareness of a display's surroundings and its local communities. We envision that such displays can create interacting places, i. e., public spaces that promote community interaction and place awareness. In this paper we describe our Interacting Places Framework (IPF), a conceptual framework for designing applications in this novel research space that we developed based on four distinct public display studies. Our IPF focuses on 4 elements: 1) content providers, i. e., entities that will supply content; 2) content viewers, i. e., people who are addressed by the content; 3) communication channels that deliver the content and range from inclusive, i. e., open-for-everyone, to exclusive, i. e., closed-group channels; and 4) an awareness diffusion layer that describes how community awareness building happens both explicitly, i. e., through content tailored towards a specific audience, and implicitly, by observing output for other people.},
acmid = {2307805},
articleno = {7},
doi = {10.1145/2307798.2307805},
isbn = {978-1-4503-1414-5},
keywords = {community interaction, interacting places, public displays, urban Computing, urban informatics},
location = {Porto, Portugal},
numpages = {6},
timestamp = {2012.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2012perdis.pdf},
}
F. Alt, S. Schneegass, A. Schmidt, J. Müller, and N. Memarovic. How to Evaluate Public Displays. In Proceedings of the 2012 international symposium on pervasive displays (PerDis’12), ACM, New York, NY, USA, 2012, p. 171–176. doi:10.1145/2307798.2307815
[BibTeX] [Abstract] [PDF]
After years in the lab, interactive public displays are finding their way into public spaces, shop windows, and public institutions. They are equipped with a multitude of sensors as well as (multi-) touch surfaces allowing not only the audience to be sensed, but also their effectiveness to be measured. The lack of generally accepted design guidelines for public displays and the fact that there are many different objectives (e.g., increasing attention, optimizing interaction times, finding the best interaction technique) make it a challenging task to pick the most suitable evaluation method. Based on a literature survey and our own experiences, this paper provides an overview of study types, paradigms, and methods for evaluation both in the lab and in the real world. Following a discussion of design challenges, we provide a set of guidelines for researchers and practitioners alike to be applied when evaluating public displays.
@InProceedings{alt2012perdis,
author = {Alt, Florian and Schneegass, Stefan and Schmidt, Albrecht and M\"{u}ller, J\"{o}rg and Memarovic, Nemanja},
booktitle = {Proceedings of the 2012 International Symposium on Pervasive Displays},
title = {{How to Evaluate Public Displays}},
year = {2012},
address = {New York, NY, USA},
month = {jun},
note = {alt2012perdis},
pages = {171--176},
publisher = {ACM},
series = {PerDis'12},
abstract = {After years in the lab, interactive public displays are finding their way into public spaces, shop windows, and public institutions. They are equipped with a multitude of sensors as well as (multi-) touch surfaces allowing not only the audience to be sensed, but also their effectiveness to be measured. The lack of generally accepted design guidelines for public displays and the fact that there are many different objectives (e.g., increasing attention, optimizing interaction times, finding the best interaction technique) make it a challenging task to pick the most suitable evaluation method. Based on a literature survey and our own experiences, this paper provides an overview of study types, paradigms, and methods for evaluation both in the lab and in the real world. Following a discussion of design challenges, we provide a set of guidelines for researchers and practitioners alike to be applied when evaluating public displays.},
acmid = {2307815},
articleno = {17},
doi = {10.1145/2307798.2307815},
isbn = {978-1-4503-1414-5},
keywords = {digital signage, evaluation, methods, public displays},
location = {Porto, Portugal},
numpages = {6},
timestamp = {2012.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012perdis.pdf},
}
F. Alt, D. Michelis, and J. Müller, “Pervasive advertising technologies,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 121–128.
[BibTeX] [PDF]
@InBook{alt2012mediacultures2e,
chapter = {Digital Black Boards (english)},
pages = {121--128},
publisher = {Av Edition},
year = {2012},
author = {Florian Alt AND Daniel Michelis AND J\"{o}rg M\"{u}ller},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
month = {may},
note = {alt2012mediacultures2e},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures2e.pdf},
}
F. Alt, D. Michelis, and J. Müller, “Pervasive advertising – technologien, konzepte, herausforderungen,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 331–338.
[BibTeX] [PDF]
@InBook{alt2012mediacultures2d,
chapter = {Digital Black Boards (english)},
pages = {331--338},
title = {Pervasive Advertising -- Technologien, Konzepte, Herausforderungen},
publisher = {Av Edition},
year = {2012},
author = {Florian Alt AND Daniel Michelis AND J\"{o}rg M\"{u}ller},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
month = {may},
note = {alt2012mediacultures2d},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures2d.pdf},
}
F. Alt, “Digital black boards,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 117–120.
[BibTeX] [PDF]
@InBook{alt2012mediacultures1e,
chapter = {Digital Black Boards (english)},
pages = {117--120},
title = {Digital Black Boards},
publisher = {Av Edition},
year = {2012},
author = {Florian Alt},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
month = {may},
note = {alt2012mediacultures1e},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures1e.pdf},
}
J. Müller, R. Walter, G. Bailly, M. Nischt, and F. Alt. Looking Glass: A Field Study on Noticing Interactivity of a Shop Window (Video). In Adjunct proceedings of the 2012 acm conference on human factors in computing systems (CHI’12), ACM, New York, NY, USA, 2012, p. 297–306. doi:10.1145/2207676.2207718
[BibTeX] [PDF]
@InProceedings{mueller2012chivideo,
author = {M\"{u}ller, J\"{o}rg and Walter, Robert and Bailly, Gilles and Nischt, Michael and Alt, Florian},
title = {{Looking Glass: A Field Study on Noticing Interactivity of a Shop Window (Video)}},
booktitle = {Adjunct Proceedings of the 2012 ACM Conference on Human Factors in Computing Systems},
year = {2012},
series = {CHI'12},
pages = {297--306},
address = {New York, NY, USA},
month = {apr},
publisher = {ACM},
acmid = {2207718},
doi = {10.1145/2207676.2207718},
isbn = {978-1-4503-1015-4},
keywords = {interactivity, noticing interactivity, public displays, User representation},
location = {Austin, Texas, USA},
numpages = {10},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2012chi.pdf},
}
F. Alt, J. Müller, and A. Schmidt. Advertising on Public Display Networks. Ieee computer, vol. 45, iss. 5, pp. 50-56, 2012.
[BibTeX] [Abstract] [PDF]
For advertising-based public display networks to become truly pervasive, they must provide a tangible social benefit and be engaging without being obtrusive, blending advertisements with informative content.
@Article{alt2012computer,
author = {Florian Alt and J{\"o}rg M{\"u}ller and Albrecht Schmidt},
title = {{Advertising on Public Display Networks}},
journal = {IEEE Computer},
year = {2012},
volume = {45},
number = {5},
pages = {50-56},
month = {may},
note = {alt2012computer},
abstract = {For advertising-based public display networks to become truly pervasive, they must provide a tangible social benefit and be engaging without being obtrusive, blending advertisements with informative content.},
bibsource = {DBLP, http://dblp.uni-trier.de},
ee = {http://doi.ieeecomputersociety.org/10.1109/MC.2012.150},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012computer.pdf},
}
F. Alt, “Digitale Schwarze Bretter,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 317–321.
[BibTeX] [PDF]
@InBook{alt2012mediacultures1d,
chapter = {Digital Black Boards (english)},
pages = {317--321},
title = {{Digitale Schwarze Bretter}},
publisher = {Av Edition},
year = {2012},
author = {Florian Alt},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
month = {may},
note = {alt2012mediacultures1d},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures1d.pdf},
}
A. Bulling, F. Alt, and A. Schmidt. Increasing The Security Of Gaze-Based Cued-Recall Graphical Passwords Using Saliency Masks. In Proceedings of the 2012 acm annual conference on human factors in computing systems (CHI’12), ACM, New York, NY, USA, 2012, p. 3011–3020. doi:10.1145/2207676.2208712
[BibTeX] [Abstract] [PDF]
With computers being used ever more ubiquitously in situations where privacy is important, secure user authentication is a central requirement. Gaze-based graphical passwords are a particularly promising means for shoulder-surfing-resistant authentication, but selecting secure passwords remains challenging. In this paper, we present a novel gaze-based authentication scheme that makes use of cued-recall graphical passwords on a single image. In order to increase password security, our approach uses a computational model of visual attention to mask those areas of the image that are most likely to attract visual attention. We create a realistic threat model for attacks that may occur in public settings, such as filming the user’s interaction while drawing money from an ATM. Based on a 12-participant user study, we show that our approach is significantly more secure than a standard image-based authentication and gaze-based 4-digit PIN entry.
@InProceedings{bulling2012chi,
author = {Bulling, Andreas and Alt, Florian and Schmidt, Albrecht},
title = {{Increasing The Security Of Gaze-Based Cued-Recall Graphical Passwords Using Saliency Masks}},
booktitle = {Proceedings of the 2012 ACM Annual Conference on Human Factors in Computing Systems},
year = {2012},
series = {CHI'12},
pages = {3011--3020},
address = {New York, NY, USA},
month = {apr},
publisher = {ACM},
note = {bulling2012chi},
abstract = {With computers being used ever more ubiquitously in situations where privacy is important, secure user authentication is a central requirement. Gaze-based graphical passwords are a particularly promising means for shoulder-surfing-resistant authentication, but selecting secure passwords remains challenging. In this paper, we present a novel gaze-based authentication scheme that makes use of cued-recall graphical passwords on a single image. In order to increase password security, our approach uses a computational model of visual attention to mask those areas of the image that are most likely to attract visual attention. We create a realistic threat model for attacks that may occur in public settings, such as filming the user's interaction while drawing money from an ATM. Based on a 12-participant user study, we show that our approach is significantly more secure than a standard image-based authentication and gaze-based 4-digit PIN entry.},
acmid = {2208712},
doi = {10.1145/2207676.2208712},
isbn = {978-1-4503-1015-4},
keywords = {cued-recall graphical passwords, eye tracking, gaze-based, saliency masks, User authentication},
location = {Austin, Texas, USA},
numpages = {10},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/bulling2012chi.pdf},
}
J. Müller, R. Walter, G. Bailly, M. Nischt, and F. Alt. Looking Glass: A Field Study on Noticing Interactivity of a Shop Window. In Proceedings of the 2012 ACM Conference on Human Factors in Computing Systems (CHI’12), ACM, New York, NY, USA, 2012, p. 297–306. doi:10.1145/2207676.2207718
[BibTeX] [Abstract] [PDF]
In this paper we present our findings from a lab and a field study investigating how passers-by notice the interactivity of public displays. We designed an interactive installation that uses visual feedback to the incidental movements of passers-by to communicate its interactivity. The lab study reveals: (1) Mirrored user silhouettes and images are more effective than avatar-like representations. (2) It takes time to notice the interactivity (approx. 1.2s). In the field study, three displays were installed during three weeks in shop windows, and data about 502 interaction sessions were collected. Our observations show: (1) Significantly more passers-by interact when immediately showing the mirrored user image (+90%) or silhouette (+47%) compared to a traditional attract sequence with call-to-action. (2) Passers-by often notice interactivity late and have to walk back to interact (the landing effect). (3) If somebody is already interacting, others begin interaction behind the ones already interacting, forming multiple rows (the honeypot effect). Our findings can be used to design public display applications and shop windows that more effectively communicate interactivity to passers-by.
@InProceedings{mueller2012chi,
author = {M\"{u}ller, J\"{o}rg and Walter, Robert and Bailly, Gilles and Nischt, Michael and Alt, Florian},
title = {{Looking Glass: A Field Study on Noticing Interactivity of a Shop Window}},
booktitle = {{Proceedings of the 2012 ACM Conference on Human Factors in Computing Systems}},
year = {2012},
series = {CHI'12},
pages = {297--306},
address = {New York, NY, USA},
month = {apr},
publisher = {ACM},
note = {mueller2012chi},
abstract = {In this paper we present our findings from a lab and a field study investigating how passers-by notice the interactivity of public displays. We designed an interactive installation that uses visual feedback to the incidental movements of passers-by to communicate its interactivity. The lab study reveals: (1) Mirrored user silhouettes and images are more effective than avatar-like representations. (2) It takes time to notice the interactivity (approx. 1.2s). In the field study, three displays were installed during three weeks in shop windows, and data about 502 interaction sessions were collected. Our observations show: (1) Significantly more passers-by interact when immediately showing the mirrored user image (+90%) or silhouette (+47%) compared to a traditional attract sequence with call-to-action. (2) Passers-by often notice interactivity late and have to walk back to interact (the landing effect). (3) If somebody is already interacting, others begin interaction behind the ones already interacting, forming multiple rows (the honeypot effect). Our findings can be used to design public display applications and shop windows that more effectively communicate interactivity to passers-by.},
acmid = {2207718},
doi = {10.1145/2207676.2207718},
isbn = {978-1-4503-1015-4},
keywords = {interactivity, noticing interactivity, public displays, User representation},
location = {Austin, Texas, USA},
numpages = {10},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2012chi.pdf},
}
N. Memarovic, M. Langheinrich, and F. Alt. Interacting places — a framework for promoting community interaction and place awareness through public displays. In 2012 ieee international conference on pervasive computing and communications workshops (), 2012, pp. 327-430. doi:10.1109/PerComW.2012.6197526
[BibTeX] [Abstract] [PDF]
The proliferation of public displays, along withubiquitous wireless communication and sensing technology,has made it possible to create a novel public communicationmedium: open networked pervasive displays would allowcitizens to provide their own content, appropriate close-bydisplays, and increase their own awareness of a display’ssurroundings and its local communities. We envision that suchdisplays ultimately can create interacting places, i.e., publicspaces that promote community interaction and placeawareness. In this paper we describe our Interacting PlacesFramework (IPF), which helps to identify challenges andopportunities in this novel research space. Our IPF has 4elements: 1) content providers, i.e., entities that supply content;2) content viewers, i.e., people who consume the content; 3) anumber of interacting places communication channels thatsupport inclusive, i.e., open-for-everyone, and exclusive, i.e.,closed-group communication; and 4) an awareness diffusionlayer that promotes community interaction either explicitly, i.e.,through content tailored towards a specific audience, orimplicitly, by observing output for other people. We havebegun initial deployments examining this space and will use theframework presented here to analyze future results.
@InProceedings{memarovic2012percomadj,
author = {N. Memarovic and M. Langheinrich and F. Alt},
title = {Interacting places — A framework for promoting community interaction and place awareness through public displays},
booktitle = {2012 IEEE International Conference on Pervasive Computing and Communications Workshops},
year = {2012},
pages = {327-430},
month = {March},
abstract = {The proliferation of public displays, along withubiquitous wireless communication and sensing technology,has made it possible to create a novel public communicationmedium: open networked pervasive displays would allowcitizens to provide their own content, appropriate close-bydisplays, and increase their own awareness of a display’ssurroundings and its local communities. We envision that suchdisplays ultimately can create interacting places, i.e., publicspaces that promote community interaction and placeawareness. In this paper we describe our Interacting PlacesFramework (IPF), which helps to identify challenges andopportunities in this novel research space. Our IPF has 4elements: 1) content providers, i.e., entities that supply content;2) content viewers, i.e., people who consume the content; 3) anumber of interacting places communication channels thatsupport inclusive, i.e., open-for-everyone, and exclusive, i.e.,closed-group communication; and 4) an awareness diffusionlayer that promotes community interaction either explicitly, i.e.,through content tailored towards a specific audience, orimplicitly, by observing output for other people. We havebegun initial deployments examining this space and will use theframework presented here to analyze future results.},
doi = {10.1109/PerComW.2012.6197526},
keywords = {liquid crystal displays;mobile computing;public utilities;social sciences;wireless sensor networks;public displays;ubiquitous wireless communication;public communication medium;open networked pervasive displays;close-by displays;local communities;public spaces;community interaction;place awareness;content providers;content viewers;interacting place communication channel;open-for-everyone communication channel;exclusive communication channel;inclusive communication channel;closed-group communication channel;awareness diffusion layer;wireless sensing technology;Communities;Communication channels;Mobile handsets;Presses;Instruments;Educational institutions;Cities and towns;community interaction;interacting places;public displays;urban computing;urban informatics},
timestamp = {2012.04.17},
}
A. Schmidt, B. Pfleging, F. Alt, A. Sahami, and G. Fitzpatrick. Interacting with 21st-century computers. Ieee pervasive computing, vol. 11, iss. 1, pp. 22-31, 2012. doi:10.1109/MPRV.2011.81
[BibTeX] [Abstract] [PDF]
This paper reflects on four themes from Weiser’s original vision from a human-computer interaction perspective: computing everywhere, personal computing, the social dimension of computing, and . The authors review developments both in accordance with and contrasting this vision.
@Article{schmidt2012pervasivecomputing,
author = {A. Schmidt and B. Pfleging and F. Alt and A. Sahami and G. Fitzpatrick},
journal = {IEEE Pervasive Computing},
title = {Interacting with 21st-Century Computers},
year = {2012},
issn = {1536-1268},
month = {January},
note = {schmidt2012pervasivecomputing},
number = {1},
pages = {22-31},
volume = {11},
abstract = {This paper reflects on four themes from Weiser's original vision from a human-computer interaction perspective: computing everywhere, personal computing, the social dimension of computing, and . The authors review developments both in accordance with and contrasting this vision.},
doi = {10.1109/MPRV.2011.81},
keywords = {data privacy;human computer interaction;social aspects of automation;human-computer interaction perspective;Weiser perspective;computing everywhere perspective;personal computing perspective;social dimension perspective;privacy implication;Pervasive computing;User/Machine Systems;User Interfaces;Multimedia Information Systems;Evolutionary prototyping;Human Factors in Software Design;User interfaces.},
timestamp = {2012.04.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schmidt2012pervasivecomputing.pdf},
}

### 2011

T. Ning, J. Müller, R. Walter, G. Bailly, C. Wacharamanotham, J. Borchers, and F. Alt. No Need To Stop: Menu Techniques for Passing by Public Displays. In Proceedings of the chi workshop on large displays in urban lif (), Vancouver, BC, Canada, 2011.
[BibTeX] [Abstract] [PDF]
Although public displays are increasingly prevalent inpublic spaces, they are generally not interactive. Menutechniques can enable users to select what is interestingto them. Current touch screen techniques are unsuitable,because for many public displays, users merelypass by and rarely stop. We investigate commandselection in this new context of passing-by interaction,in which users only have a few seconds to interact. Wepresent six hands-free gestural techniques and evaluatethem in a Wizard-of-Oz experiment. Based on theresults of this study, we provide design recommendationsfor menu selection in passing-by situations.
@InProceedings{ning2011ldul,
author = {Tongyan Ning AND J\"{o}rg M\"{u}ller AND Robert Walter AND Gilles Bailly AND Chachatvan Wacharamanotham AND Jan Borchers AND Florian Alt},
title = {{No Need To Stop: Menu Techniques for Passing by Public Displays}},
booktitle = {Proceedings of the CHI Workshop on Large Displays in Urban Lif},
year = {2011},
month = {apr},
note = {ning2011ldul},
abstract = {Although public displays are increasingly prevalent inpublic spaces, they are generally not interactive. Menutechniques can enable users to select what is interestingto them. Current touch screen techniques are unsuitable,because for many public displays, users merelypass by and rarely stop. We investigate commandselection in this new context of passing-by interaction,in which users only have a few seconds to interact. Wepresent six hands-free gestural techniques and evaluatethem in a Wizard-of-Oz experiment. Based on theresults of this study, we provide design recommendationsfor menu selection in passing-by situations.},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/ning2011ldul.pdf},
}
F. Alt, T. Kubitza, D. Bial, F. Zaidan, M. Ortel, B. Zurmaar, T. Lewen, A. S. Shirazi, and A. Schmidt. Digifieds: insights into deploying digital public notice areas in the wild. In Proceedings of the 10th international conference on mobile and ubiquitous multimedia (MUM ’11), ACM, New York, NY, USA, 2011, p. 165–174. doi:10.1145/2107596.2107618
[BibTeX] [Abstract] [PDF]
Traditional public notice areas (PNAs) are nowadays a popular means to publicly exchange information and reach people of a local community. The high usability led to a wide-spread use in stores, cafes, supermarkets, and public institutions. With public displays permeating public spaces and with display providers and owners being willing to share parts of their display space we envision traditional PNAs to be complemented or even replaced by their digital counterparts in the future, hence contributing to making public displays a novel communication medium. In this paper we report on the design and development of Digifieds (derived from digital classified), a digital public notice area. We deployed and evaluated Digifieds in an urban environment in the context of the UbiChallenge 2011 in Oulu, Finland over the course of 6 months. The deployment allowed the users’ view to be studied with regard to the envisioned content, preferred interaction techniques, as well as privacy concerns, and to compare them against traditional PNAs.
@InProceedings{alt2011mum1,
author = {Alt, Florian and Kubitza, Thomas and Bial, Dominik and Zaidan, Firas and Ortel, Markus and Zurmaar, Bj\"{o}rn and Lewen, Tim and Shirazi, Alireza Sahami and Schmidt, Albrecht},
title = {Digifieds: Insights into Deploying Digital Public Notice Areas in the Wild},
booktitle = {Proceedings of the 10th International Conference on Mobile and Ubiquitous Multimedia},
year = {2011},
series = {MUM '11},
pages = {165--174},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2011mum1},
abstract = {Traditional public notice areas (PNAs) are nowadays a popular means to publicly exchange information and reach people of a local community. The high usability led to a wide-spread use in stores, cafes, supermarkets, and public institutions. With public displays permeating public spaces and with display providers and owners being willing to share parts of their display space we envision traditional PNAs to be complemented or even replaced by their digital counterparts in the future, hence contributing to making public displays a novel communication medium. In this paper we report on the design and development of Digifieds (derived from digital classified), a digital public notice area. We deployed and evaluated Digifieds in an urban environment in the context of the UbiChallenge 2011 in Oulu, Finland over the course of 6 months. The deployment allowed the users' view to be studied with regard to the envisioned content, preferred interaction techniques, as well as privacy concerns, and to compare them against traditional PNAs.},
acmid = {2107618},
doi = {10.1145/2107596.2107618},
isbn = {978-1-4503-1096-3},
keywords = {classifieds, digifieds, interaction, public displays, urban computing},
location = {Beijing, China},
numpages = {10},
timestamp = {2011.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2011mum1.pdf},
}
F. Alt, A. Bungert, B. Pfleging, A. Schmidt, and M. Havemann. Supporting Children With Special Needs Through Multi-Perspective Behavior Analysis. In Proceedings of the tenth international conference on mobile and ubiquitous multimedia (MUM’11), ACM, New York, NY, USA, 2011, p. 81–84. doi:10.1145/2107596.2107605
[BibTeX] [Abstract] [PDF]
In past years, ubiquitous computing technologies have been successfully deployed for supporting children with special needs. One focus of current research has been on post-hoc behavior analysis based on video footage where one or multiple cameras were used to review situations in which children behaved in a certain way. As miniaturized cameras as well as portable devices are becoming available at low costs, we envision a new quality in supporting the diagnosis, observation, and education of children with special needs. In contrast to existing approaches that use cameras in fix locations, we suggest to use multiple mobile camera perspectives. In this way observation data from fellow classmates, teachers, and caregivers can be considered, even in highly dynamic outdoor situations. In this paper we present MuPerBeAn, a platform that allows multi-perspective video footage from mobile cameras to be collected, synchronously reviewed, and annotated. We report on interviews with caregivers and parents and present a qualitative study based on two scenarios involving a total of seven children with autism (CWA). Our findings show that observing multiple mobile perspectives can help children as well as teachers to better reflect on situations, particularly during education.
@InProceedings{alt2011mum2,
author = {Alt, Florian and Bungert, Andreas and Pfleging, Bastian and Schmidt, Albrecht and Havemann, Meindert},
title = {{Supporting Children With Special Needs Through Multi-Perspective Behavior Analysis}},
booktitle = {Proceedings of the Tenth International Conference on Mobile and Ubiquitous Multimedia},
year = {2011},
series = {MUM'11},
pages = {81--84},
address = {New York, NY, USA},
month = {dec},
publisher = {ACM},
note = {alt2011mum2},
abstract = {In past years, ubiquitous computing technologies have been successfully deployed for supporting children with special needs. One focus of current research has been on post-hoc behavior analysis based on video footage where one or multiple cameras were used to review situations in which children behaved in a certain way. As miniaturized cameras as well as portable devices are becoming available at low costs, we envision a new quality in supporting the diagnosis, observation, and education of children with special needs. In contrast to existing approaches that use cameras in fix locations, we suggest to use multiple mobile camera perspectives. In this way observation data from fellow classmates, teachers, and caregivers can be considered, even in highly dynamic outdoor situations. In this paper we present MuPerBeAn, a platform that allows multi-perspective video footage from mobile cameras to be collected, synchronously reviewed, and annotated. We report on interviews with caregivers and parents and present a qualitative study based on two scenarios involving a total of seven children with autism (CWA). Our findings show that observing multiple mobile perspectives can help children as well as teachers to better reflect on situations, particularly during education.},
acmid = {2107605},
doi = {10.1145/2107596.2107605},
isbn = {978-1-4503-1096-3},
keywords = {autism, cameras, mobile devices, ubiquitous computing},
location = {Beijing, China},
numpages = {4},
timestamp = {2011.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2011mum2.pdf},
}
J. Müller, F. Alt, and D. Michelis. Introduction to Pervasive Advertising. In Pervasive advertising (), Springer Limited London, 2011.
[BibTeX] [Abstract] [PDF]
As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.
@InProceedings{mueller2011perad-intro,
author = {J\"{o}rg M\"{u}ller and Florian Alt and Daniel Michelis},
title = {{Introduction to Pervasive Advertising}},
year = {2011},
editor = {J\"{o}rg M\"{u}ller and Florian Alt and Daniel Michelis},
month = {sep},
publisher = {Springer Limited London},
abstract = {As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.},
comment = {978-0-85729-351-0},
owner = {flo},
timestamp = {2011.09.01},
}
J. Müller, F. Alt, and D. Michelis, Pervasive Advertising, {Springer London Limited}, 2011.
[BibTeX] [Abstract] [PDF]
As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.
@Book{mueller2011perad,
publisher = {{Springer London Limited}},
year = {2011},
author = {J{\"o}rg M{\"u}ller and Florian Alt and Daniel Michelis},
isbn = {978-0-85729-351-0},
abstract = {As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.},
bibsource = {DBLP, http://dblp.uni-trier.de},
comment = {978-0-85729-351-0},
ee = {http://dx.doi.org/10.1007/978-0-85729-352-7},
timestamp = {2011.09.01},
}
N. Memarovic, M. Langheinrich, and F. Alt. Connecting People through Content – Promoting Community Identity Cognition through People and Places. In Proceedings of community informatics (), 2011.
[BibTeX] [Abstract] [PDF]
Large public screens are proliferating in public spaces. Today, most of them arestandalone installations that display advertisements in the form of slides, short movies, or stillimages. However, it is not hard to imagine that these displays will soon be connected throughthe Internet, thus creating a global and powerful communication medium capable of providingrich, interactive applications. We believe that such a medium has the potential to fosterconnections within and between communities in public spaces. In this paper we present aresearch agenda for interacting places, i.e., public spaces that connect communities throughpublic displays. We then report on our initial work in this space, in particular on using publicdisplays for creating what we call identity cognition – increasing the sense of being connectedbetween community members occupying the same space. We have investigated two options forachieving identity cognition: (a) through content that originates from the environment, and (b)through content that originates from people. Content originating from the environment portraysinformation about a display’s surrounding. For this type of content, identity cognition is usuallybeing achieved implicitly by stimulating the effect of ‘triangulation’, an effect whereparticularities of the physical space act as links between people. Content originating frompeople, on the other hand, explicitly achieves identity cognition by promoting community valuesthrough content that expresses the attitudes, beliefs, and ideas of individual communitymembers. We have built and deployed two public display applications that support identitycognition using environmentally-sourced content and people-sourced content, respectively.
@InProceedings{memarovic2011cirn,
author = {Nemanja Memarovic and Marc Langheinrich and Florian Alt},
title = {{Connecting People through Content - Promoting Community Identity Cognition through People and Places}},
booktitle = {Proceedings of Community Informatics},
year = {2011},
note = {memarovic2011cirn},
abstract = {Large public screens are proliferating in public spaces. Today, most of them arestandalone installations that display advertisements in the form of slides, short movies, or stillimages. However, it is not hard to imagine that these displays will soon be connected throughthe Internet, thus creating a global and powerful communication medium capable of providingrich, interactive applications. We believe that such a medium has the potential to fosterconnections within and between communities in public spaces. In this paper we present aresearch agenda for interacting places, i.e., public spaces that connect communities throughpublic displays. We then report on our initial work in this space, in particular on using publicdisplays for creating what we call identity cognition – increasing the sense of being connectedbetween community members occupying the same space. We have investigated two options forachieving identity cognition: (a) through content that originates from the environment, and (b)through content that originates from people. Content originating from the environment portraysinformation about a display’s surrounding. For this type of content, identity cognition is usuallybeing achieved implicitly by stimulating the effect of ‘triangulation’, an effect whereparticularities of the physical space act as links between people. Content originating frompeople, on the other hand, explicitly achieves identity cognition by promoting community valuesthrough content that expresses the attitudes, beliefs, and ideas of individual communitymembers. We have built and deployed two public display applications that support identitycognition using environmentally-sourced content and people-sourced content, respectively.},
location = {Prato, Italy},
timestamp = {2011.08.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2011cirn.pdf},
}
A. S. Shirazi, T. Kubitza, F. Alt, P. Tarasiewicz, A. Bungert, V. Minakov, and A. Schmidt. Mobile Context-based Ride Sharing. In Adjunct proceedings of the ninth international conference on pervasive computing (Pervasive’11), San Francisco, CA, US, 2011.
[BibTeX] [Abstract] [PDF]
When it comes to transportation, especially in densely populated areas,people usually face a trade-off between convenience and costs. Whereas onone hand convenience as a driving factor leads to that people are preferring touse cars, air pollution, traffic jams, and high cost due to fuel price on the otherhand encourage many people (e.g., commuters) to use collective transportation(CT), such as public transport systems. However it does not support door-todoortransportation and might be inconvenient due to limited services in offpeakhours or high costs when travelling long distances. A solution growing inpopularity is ride sharing, a form of CT making alternative transportation moreaffordable. In this paper we present a modular platform supporting differentforms of ride sharing based on context information. WEtaxi is a system, whichallows sharing taxis among multiple persons. WEticket supports sharing traintickets through finding additional people going onto the same journey.
@InProceedings{sahami2011pervasiveadj,
author = {Alireza Sahami Shirazi AND Thomas Kubitza AND Florian Alt AND Philipp Tarasiewicz AND Andreas Bungert AND Vladimir Minakov AND Albrecht Schmidt},
title = {{Mobile Context-based Ride Sharing}},
booktitle = {Adjunct Proceedings of the Ninth International Conference on Pervasive Computing},
year = {2011},
series = {Pervasive'11},
address = {San Francisco, CA, US},
abstract = {When it comes to transportation, especially in densely populated areas,people usually face a trade-off between convenience and costs. Whereas onone hand convenience as a driving factor leads to that people are preferring touse cars, air pollution, traffic jams, and high cost due to fuel price on the otherhand encourage many people (e.g., commuters) to use collective transportation(CT), such as public transport systems. However it does not support door-todoortransportation and might be inconvenient due to limited services in offpeakhours or high costs when travelling long distances. A solution growing inpopularity is ride sharing, a form of CT making alternative transportation moreaffordable. In this paper we present a modular platform supporting differentforms of ride sharing based on context information. WEtaxi is a system, whichallows sharing taxis among multiple persons. WEticket supports sharing traintickets through finding additional people going onto the same journey.},
owner = {flo},
timestamp = {2011.06.01},
}
F. Alt, N. Memarovic, I. Elhart, D. Bial, A. Schmidt, M. Langheinrich, G. Harboe, E. Huang, and M. P. Scipioni. Designing Shared Public Display Networks: Implications from Today’s Paper-based Notice Areas. In Proceedings of the ninth international conference on pervasive computing (Pervasive’11), Springer-Verlag, Berlin, Heidelberg, 2011, p. 258–275.
[BibTeX] [Abstract] [PDF]
Abstract. Large public displays have become a regular conceptual element in many shops and businesses, where they advertise products or highlight upcoming events. In our work, we are interested in exploring how these isolated display solutions can be interconnected to form a single large network of public displays, thus supporting novel forms of sharing access to display real estate. In order to explore the feasibility of this vision, we investigated today’s practices surrounding shared notice areas, i.e. places where customers and visitors can put up event posters and classifieds, such as shop windows or notice boards. In particular, we looked at the content posted to such areas, the means for sharing it (i.e., forms of content control), and the reason for providing the shared notice area. Based on two-week long photo logs and a number of in-depth interviews with providers of such notice areas, we provide a systematic assessment of factors that inhibit or promote the shared use of public display space, ultimately leading to a set of concrete design implication for providing future digital versions of such public notice areas in the form of networked public displays.
@InProceedings{alt2011pervasive,
author = {Alt, Florian and Memarovic, Nemanja and Elhart, Ivan and Bial, Dominik and Schmidt, Albrecht and Langheinrich, Marc and Harboe, Gunnar and Huang, Elaine and Scipioni, Marcello P.},
title = {{Designing Shared Public Display Networks: Implications from Today's Paper-based Notice Areas}},
booktitle = {Proceedings of the Ninth International Conference on Pervasive Computing},
year = {2011},
series = {Pervasive'11},
pages = {258--275},
month = {jun},
publisher = {Springer-Verlag},
note = {alt2011pervasive},
abstract = {Abstract. Large public displays have become a regular conceptual element in many shops and businesses, where they advertise products or highlight upcoming events. In our work, we are interested in exploring how these isolated display solutions can be interconnected to form a single large network of public displays, thus supporting novel forms of sharing access to display real estate. In order to explore the feasibility of this vision, we investigated today’s practices surrounding shared notice areas, i.e. places where customers and visitors can put up event posters and classifieds, such as shop windows or notice boards. In particular, we looked at the content posted to such areas, the means for sharing it (i.e., forms of content control), and the reason for providing the shared notice area. Based on two-week long photo logs and a number of in-depth interviews with providers of such notice areas, we provide a systematic assessment of factors that inhibit or promote the shared use of public display space, ultimately leading to a set of concrete design implication for providing future digital versions of such public notice areas in the form of networked public displays.},
acmid = {2021999},
isbn = {978-3-642-21725-8},
keywords = {advertising, observation, public display},
location = {San Francisco, USA},
numpages = {18},
timestamp = {2011.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2011pervasive.pdf},
}
F. Alt, D. Bial, T. Kubitza, A. S. Shirazi, M. Ortel, B. Zurmaar, F. Zaidan, T. Lewen, and A. Schmidt. Digifieds: Evaluating Suitable Interaction Techniques for Shared Public Notice Areas. In Adjunct proceedings of the ninth international conference on pervasive computing (Pervasive’11), San Francisco, CA, USA, 2011.
[BibTeX] [Abstract] [PDF]
Public notice areas are nowadays being widely used in stores, restaurants,cafes and public institutions by customers and visitors to sell or advertiseproducts and upcoming events. Although web platforms such as Craigslist oreBay offer similar services, traditional notice areas are highly popular as usingpen and paper poses only a minimal barrier to share content. With public displaysproliferating the public space and with means to network these displays,novel opportunities arise as to how information can be managed and shared. Inan initial step we systematically assessed factors inhibiting or promoting theshared use of public display space and derived design implications for providinga digital version of such public notice areas [2]. In this poster we report onthe implementation of such a digital shared notice area, called Digifieds. Withan initial lab study we aimed at understanding suitable means of interactionwhen it comes to creating, posting, and taking away content.
@InProceedings{alt2011pervasiveadj,
author = {Florian Alt AND Dominik Bial AND Thomas Kubitza AND Alireza Sahami Shirazi AND Markus Ortel AND Bjoern Zurmaar AND Firas Zaidan AND Tim Lewen AND Albrecht Schmidt},
title = {{Digifieds: Evaluating Suitable Interaction Techniques for Shared Public Notice Areas}},
booktitle = {Adjunct Proceedings of the Ninth International Conference on Pervasive Computing},
year = {2011},
series = {Pervasive'11},
address = {San Francisco, CA, USA},
month = {jun},
abstract = {Public notice areas are nowadays being widely used in stores, restaurants,cafes and public institutions by customers and visitors to sell or advertiseproducts and upcoming events. Although web platforms such as Craigslist oreBay offer similar services, traditional notice areas are highly popular as usingpen and paper poses only a minimal barrier to share content. With public displaysproliferating the public space and with means to network these displays,novel opportunities arise as to how information can be managed and shared. Inan initial step we systematically assessed factors inhibiting or promoting theshared use of public display space and derived design implications for providinga digital version of such public notice areas [2]. In this poster we report onthe implementation of such a digital shared notice area, called Digifieds. Withan initial lab study we aimed at understanding suitable means of interactionwhen it comes to creating, posting, and taking away content.},
owner = {flo},
timestamp = {2011.06.01},
}
M. Langheinrich, N. Memarovic, I. Elhart, and F. Alt. Autopoiesic Content: A Conceptual Model for Enabling Situated Self-generative Content for Public Displays. In Proceedings of the first workshop on pervasive urban applications (PURBA’11), 2011.
[BibTeX] [Abstract] [PDF]
Abstract. The significant price drops in large LCD panels have led to a massive proliferation of digital public displays in public spaces. Most of these displays, however, simply show some form of traditional advertising, such as short commercials, animated presentations, or still images. Creating content that ex-plicitly takes the particular location and surroundings of a space into account, in order to increase its relevance for passers-by, is typically infeasible due to the high costs associated with customized content. We argue that the concept of au-topoiesic content (i.e., self-generative content) could significantly increase the local relevance of such situated public displays without requiring much custom-ization efforts. As a sample application, this position paper outlines the concept and architecture of Funsquare, a large public display system that uses autopoi-esic content to facilitate social interaction.
@InProceedings{langheinrich2011purba,
author = {M. Langheinrich AND N. Memarovic AND I. Elhart AND F. Alt},
title = {{Autopoiesic Content: A Conceptual Model for Enabling Situated Self-generative Content for Public Displays}},
booktitle = {Proceedings of the First Workshop on Pervasive Urban Applications},
year = {2011},
series = {PURBA'11},
month = {jun},
note = {langheinrich2011purba},
abstract = {Abstract. The significant price drops in large LCD panels have led to a massive proliferation of digital public displays in public spaces. Most of these displays, however, simply show some form of traditional advertising, such as short commercials, animated presentations, or still images. Creating content that ex-plicitly takes the particular location and surroundings of a space into account, in order to increase its relevance for passers-by, is typically infeasible due to the high costs associated with customized content. We argue that the concept of au-topoiesic content (i.e., self-generative content) could significantly increase the local relevance of such situated public displays without requiring much custom-ization efforts. As a sample application, this position paper outlines the concept and architecture of Funsquare, a large public display system that uses autopoi-esic content to facilitate social interaction.},
location = {San Francisco, US},
timestamp = {2011.05.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/langheinrich2011purba.pdf},
}
D. Bial, D. Kern, F. Alt, and A. Schmidt. Enhancing Outdoor Navigation Systems Through Vibrotactile Feedback. In Chi ’11 extended abstracts on human factors in computing systems (CHI EA’11), ACM, New York, NY, USA, 2011, p. 1273–1278. doi:10.1145/1979742.1979760
[BibTeX] [Abstract] [PDF]
While driving many tasks compete for the attention of the user, mainly via the audio and visual channel. When designing systems depending upon providing feedback to users (e.g., navigation systems), it is a crucial prerequisite to minimize influence on and distraction from the driving task. This becomes even more important when designing systems for the use on motorbikes; space for output devices is scarce, as people are wearing helmets visual feedback is often difficult due to lighting conditions, and audio feedback is limited. In a first step we aimed at creating an understanding as to how information could be communicated in a meaningful way using vibrotactile signals. Therefore, we investigated suitable positions of actuators on the hand, appropriate length of the vibration stimulus, and different vibration patterns. We built a first prototype with 4 vibration actuators attached to the fingertips and asked 4 participants to test our prototype while driving. With this work we envision to lay the foundations for vibrotactile support in navigation systems.
@InProceedings{bial2011chiea,
author = {Bial, Dominik and Kern, Dagmar and Alt, Florian and Schmidt, Albrecht},
title = {{Enhancing Outdoor Navigation Systems Through Vibrotactile Feedback}},
booktitle = {CHI '11 Extended Abstracts on Human Factors in Computing Systems},
year = {2011},
series = {CHI EA'11},
pages = {1273--1278},
address = {New York, NY, USA},
month = {apr},
publisher = {ACM},
note = {bial2011chiea},
abstract = {While driving many tasks compete for the attention of the user, mainly via the audio and visual channel. When designing systems depending upon providing feedback to users (e.g., navigation systems), it is a crucial prerequisite to minimize influence on and distraction from the driving task. This becomes even more important when designing systems for the use on motorbikes; space for output devices is scarce, as people are wearing helmets visual feedback is often difficult due to lighting conditions, and audio feedback is limited. In a first step we aimed at creating an understanding as to how information could be communicated in a meaningful way using vibrotactile signals. Therefore, we investigated suitable positions of actuators on the hand, appropriate length of the vibration stimulus, and different vibration patterns. We built a first prototype with 4 vibration actuators attached to the fingertips and asked 4 participants to test our prototype while driving. With this work we envision to lay the foundations for vibrotactile support in navigation systems.},
acmid = {1979760},
doi = {10.1145/1979742.1979760},
isbn = {978-1-4503-0268-5},
keywords = {field study, motorcycling, vibration patterns, vibro tactile navigation},
numpages = {6},
timestamp = {2011.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/bial2011chiea.pdf},
}
G. Beyer, F. Alt, J. Müller, A. Schmidt, K. Isakovic, S. Klose, M. Schiewe, and I. Haulsen. Audience Behavior Around Large Interactive Cylindrical Screens. In Proceedings of the 2011 Annual Conference on Human Factors in Computing Systems (CHI’11), ACM, New York, NY, USA, 2011, p. 1021–1030. doi:http://doi.acm.org/10.1145/1978942.1979095
[BibTeX] [Abstract] [PDF]
Non-planar screens, such as columns, have been a popular means for displaying information for a long time. In con-trast to traditional displays their digital counterparts are mainly flat and rectangular due to current technological constraints. However, we envision bendable displays to be available in the future, which will allow for creating new forms of displays with new properties. In this paper we ex-plore cylindrical displays as a possible form of such novel public displays. We present a prototype and report on a user study, comparing the influence of the display shape on user behavior and user experience between flat and cylindrical displays. The results indicate that people move more in the vicinity of cylindrical displays and that there is no longer a default position when it comes to interaction. As a result, such displays are especially suitable to keep people in motion and to support gesture-like interaction.
@InProceedings{beyer2011chi,
author = {Beyer, Gilbert and Alt, Florian and M\"{u}ller, J\"{o}rg and Schmidt, Albrecht and Isakovic, Karsten and Klose, Stefan and Schiewe, Manuel and Haulsen, Ivo},
title = {{Audience Behavior Around Large Interactive Cylindrical Screens}},
booktitle = {{Proceedings of the 2011 Annual Conference on Human Factors in Computing Systems}},
year = {2011},
series = {CHI'11},
pages = {1021--1030},
address = {New York, NY, USA},
month = {apr},
publisher = {ACM},
note = {beyer2011chi},
abstract = {Non-planar screens, such as columns, have been a popular means for displaying information for a long time. In con-trast to traditional displays their digital counterparts are mainly flat and rectangular due to current technological constraints. However, we envision bendable displays to be available in the future, which will allow for creating new forms of displays with new properties. In this paper we ex-plore cylindrical displays as a possible form of such novel public displays. We present a prototype and report on a user study, comparing the influence of the display shape on user behavior and user experience between flat and cylindrical displays. The results indicate that people move more in the vicinity of cylindrical displays and that there is no longer a default position when it comes to interaction. As a result, such displays are especially suitable to keep people in motion and to support gesture-like interaction.},
acmid = {1979095},
doi = {http://doi.acm.org/10.1145/1978942.1979095},
isbn = {978-1-4503-0228-9},
keywords = {cylindrical screens, digital columns, display formats, interactive surfaces, non-planar screens, public displays},
numpages = {10},
timestamp = {2011.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/beyer2011chi.pdf},
}
G. Beyer, F. Alt, and J. Müller. On the Impact of Non-flat Screens on the Interaction with Public Displays. In Proceedings of the chi workshop on large displays in urban life (), Vancouver, BC, Canada, 2011.
[BibTeX] [Abstract] [PDF]
With decreasing prices for display technologies andbendable displays becoming commercially available,novel forms of public displays in arbitrary shapesemerge. However, different shapes impact on how usersbehave in the vicinity of such displays and how theyinteract with them. With our research we take a firststep towards exploring these novel displays. Wepresent findings from an initial study with cylindricaldisplays and discuss to what extent findings can be generalizedtowards other forms of public displays.
@InProceedings{beyer2011ldul,
author = {Gilbert Beyer AND Florian Alt AND J\"{o}rg M\"{u}ller},
title = {{On the Impact of Non-flat Screens on the Interaction with Public Displays}},
booktitle = {Proceedings of the CHI Workshop on Large Displays in Urban Life},
year = {2011},
month = {apr},
note = {beyer2011ldul},
abstract = {With decreasing prices for display technologies andbendable displays becoming commercially available,novel forms of public displays in arbitrary shapesemerge. However, different shapes impact on how usersbehave in the vicinity of such displays and how theyinteract with them. With our research we take a firststep towards exploring these novel displays. Wepresent findings from an initial study with cylindricaldisplays and discuss to what extent findings can be generalizedtowards other forms of public displays.},
owner = {flo},
timestamp = {2011.04.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/beyer2011ldul.pdf},
}

### 2010

F. Alt, D. Kern, F. Schulte, B. Pfleging, A. S. Shirazi, and A. Schmidt. Enabling micro-entertainment in vehicles based on context information. In Proceedings of the 2nd international conference on automotive user interfaces and interactive vehicular applications (AutomotiveUI ’10), ACM, New York, NY, USA, 2010, p. 117–124. doi:10.1145/1969773.1969794
[BibTeX] [Abstract] [PDF]
People spend a significant amount of time in their cars (US: 86 minutes/day, Europe: 43 minutes/day) while commuting, shopping, or traveling. Hence, the variety of entertainment in the car increases, and many vehicles are already equipped with displays, allowing for watching news, videos, accessing the Internet, or playing games. At the same time, the urbanization caused a massive increase of traffic volume, which led to people spending an ever-increasing amount of their time in front of red traffic lights. An observation of the prevailing forms of entertainment in the car reveals that content such as text, videos, or games are often a mere adaptation of content produced for television, public displays, PCs, or mobile phones and do not adapt to the situation in the car. In this paper we report on a web survey assessing which forms of entertainment and which types of content are considered to be useful for in-car entertainment by drivers. We then introduce an algorithm, which is capable of learning standing times in front of traffic lights based on GPS information only. This, on one hand, allows for providing content of appropriate length, on the other hand, for directing the attention of the driver back to-wards the street at the right time. Finally, we present a prototype implementation and a qualitative evaluation.
@InProceedings{alt2010autoui,
author = {Alt, Florian and Kern, Dagmar and Schulte, Fabian and Pfleging, Bastian and Shirazi, Alireza Sahami and Schmidt, Albrecht},
title = {Enabling Micro-entertainment in Vehicles Based on Context Information},
booktitle = {Proceedings of the 2Nd International Conference on Automotive User Interfaces and Interactive Vehicular Applications},
year = {2010},
series = {AutomotiveUI '10},
pages = {117--124},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2010autoui},
abstract = {People spend a significant amount of time in their cars (US: 86 minutes/day, Europe: 43 minutes/day) while commuting, shopping, or traveling. Hence, the variety of entertainment in the car increases, and many vehicles are already equipped with displays, allowing for watching news, videos, accessing the Internet, or playing games. At the same time, the urbanization caused a massive increase of traffic volume, which led to people spending an ever-increasing amount of their time in front of red traffic lights. An observation of the prevailing forms of entertainment in the car reveals that content such as text, videos, or games are often a mere adaptation of content produced for television, public displays, PCs, or mobile phones and do not adapt to the situation in the car. In this paper we report on a web survey assessing which forms of entertainment and which types of content are considered to be useful for in-car entertainment by drivers. We then introduce an algorithm, which is capable of learning standing times in front of traffic lights based on GPS information only. This, on one hand, allows for providing content of appropriate length, on the other hand, for directing the attention of the driver back to-wards the street at the right time. Finally, we present a prototype implementation and a qualitative evaluation.},
acmid = {1969794},
doi = {10.1145/1969773.1969794},
isbn = {978-1-4503-0437-5},
keywords = {GPS, context, micro entertainment, vehicle},
location = {Pittsburgh, Pennsylvania},
numpages = {8},
timestamp = {2010.11.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2010autoui.pdf},
}
F. Alt, A. S. Shirazi, A. Schmidt, U. Kramer, and Z. Nawaz. Location-based Crowdsourcing: Extending Crowdsourcing to the Real World. In Proceedings of the sixth nordic conference on human-computer interaction: extending boundaries (NordiCHI ’10), ACM, New York, NY, USA, 2010, p. 13–22. doi:10.1145/1868914.1868921
[BibTeX] [Abstract] [PDF]
The WWW and the mobile phone have become an essential means for sharing implicitly and explicitly generated information and a communication platform for many people. With the increasing ubiquity of location sensing included in mobile devices we investigate the arising opportunities for mobile crowdsourcing making use of the real world context. In this paper we assess how the idea of user-generated content, web-based crowdsourcing, and mobile electronic coordination can be combined to extend crowdsourcing beyond the digital domain and link it to tasks in the real world. To explore our concept we implemented a crowd-sourcing platform that integrates location as a parameter for distributing tasks to workers. In the paper we describe the concept and design of the platform and discuss the results of two user studies. Overall the findings show that integrating tasks in the physical world is useful and feasible. We observed that (1) mobile workers prefer to pull tasks rather than getting them pushed, (2) requests for pictures were the most favored tasks, and (3) users tended to solve tasks mainly in close proximity to their homes. Based on this, we discuss issues that should be considered during designing mobile crowdsourcing applications.
@InProceedings{alt2010nordichi,
author = {Alt, Florian and Shirazi, Alireza Sahami and Schmidt, Albrecht and Kramer, Urs and Nawaz, Zahid},
title = {{Location-based Crowdsourcing: Extending Crowdsourcing to the Real World}},
booktitle = {Proceedings of the Sixth Nordic Conference on Human-Computer Interaction: Extending Boundaries},
year = {2010},
series = {NordiCHI '10},
pages = {13--22},
address = {New York, NY, USA},
month = {oct},
publisher = {ACM},
note = {alt2010nordichi},
abstract = {The WWW and the mobile phone have become an essential means for sharing implicitly and explicitly generated information and a communication platform for many people. With the increasing ubiquity of location sensing included in mobile devices we investigate the arising opportunities for mobile crowdsourcing making use of the real world context. In this paper we assess how the idea of user-generated content, web-based crowdsourcing, and mobile electronic coordination can be combined to extend crowdsourcing beyond the digital domain and link it to tasks in the real world. To explore our concept we implemented a crowd-sourcing platform that integrates location as a parameter for distributing tasks to workers. In the paper we describe the concept and design of the platform and discuss the results of two user studies. Overall the findings show that integrating tasks in the physical world is useful and feasible. We observed that (1) mobile workers prefer to pull tasks rather than getting them pushed, (2) requests for pictures were the most favored tasks, and (3) users tended to solve tasks mainly in close proximity to their homes. Based on this, we discuss issues that should be considered during designing mobile crowdsourcing applications.},
acmid = {1868921},
doi = {10.1145/1868914.1868921},
isbn = {978-1-60558-934-3},
journal = {Proceedings of the Sixth Nordic Conference on Human-Computer Interaction: Extending Boundaries},
keywords = {context, crowdsourcing, location, mobile phone},
location = {Reykjavik, Iceland},
numpages = {10},
timestamp = {2010.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2010nordichi.pdf},
}
J. Müller, F. Alt, D. Michelis, and A. Schmidt. Requirements and Design Space for Interactive Public Displays. In Proceedings of the International Conference on Multimedia (MM’10), ACM, New York, NY, USA, 2010, p. 1285–1294. doi:10.1145/1873951.1874203
[BibTeX] [Abstract] [PDF]
Digital immersion is moving into public space. Interactive screens and public displays are deployed in urban environments, malls, and shop windows. Inner city areas, airports, train stations and stadiums are experiencing a transformation from traditional to digital displays enabling new forms of multimedia presentation and new user experiences. Imagine a walkway with digital displays that allows a user to immerse herself in her favorite content while moving through public space. In this paper we discuss the fundamentals for creating exciting public displays and multimedia experiences enabling new forms of engagement with digital content. Interaction in public space and with public displays can be categorized in phases, each having specific requirements. Attracting, engaging and motivating the user are central design issues that are addressed in this paper. We provide a comprehensive analysis of the design space explaining mental models and interaction modalities and we conclude a taxonomy for interactive public display from this analysis. Our analysis and the taxonomy are grounded in a large number of research projects, art installations and experience. With our contribution we aim at providing a comprehensive guide for designers and developers of interactive multimedia on public displays.
@InProceedings{mueller2010mm,
author = {M\"{u}ller, J\"{o}rg and Alt, Florian and Michelis, Daniel and Schmidt, Albrecht},
title = {{Requirements and Design Space for Interactive Public Displays}},
booktitle = {{Proceedings of the International Conference on Multimedia}},
year = {2010},
series = {MM'10},
pages = {1285--1294},
address = {New York, NY, USA},
publisher = {ACM},
note = {mueller2010mm},
abstract = {Digital immersion is moving into public space. Interactive screens and public displays are deployed in urban environments, malls, and shop windows. Inner city areas, airports, train stations and stadiums are experiencing a transformation from traditional to digital displays enabling new forms of multimedia presentation and new user experiences. Imagine a walkway with digital displays that allows a user to immerse herself in her favorite content while moving through public space. In this paper we discuss the fundamentals for creating exciting public displays and multimedia experiences enabling new forms of engagement with digital content. Interaction in public space and with public displays can be categorized in phases, each having specific requirements. Attracting, engaging and motivating the user are central design issues that are addressed in this paper. We provide a comprehensive analysis of the design space explaining mental models and interaction modalities and we conclude a taxonomy for interactive public display from this analysis. Our analysis and the taxonomy are grounded in a large number of research projects, art installations and experience. With our contribution we aim at providing a comprehensive guide for designers and developers of interactive multimedia on public displays.},
acmid = {1874203},
doi = {10.1145/1873951.1874203},
isbn = {978-1-60558-933-6},
keywords = {design space, interaction, public displays, requirements},
location = {Firenze, Italy},
numpages = {10},
timestamp = {2010.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2010mm.pdf},
}
A. Müller, A. S. Shirazi, F. Alt, and A. Schmidt. ZoneTrak: Design and Implementation of an Emergency Management Assistance System. In Adjunct proceedings of the eigth international conference on pervasive computing (Pervasive’10), Helsinki, Finland, 2010.
[BibTeX] [Abstract] [PDF]
Though pervasive computing technologies are omnipresentin our daily lives, emergency cases, such as earthquakes, or res oftencause serious damages to the underlying infrastructure. In such casesrescue units rely on paper maps of the operation areas and importantinformation are broadcasted either from a central unit or from otherteams. This information is manually updated on the paper map, not onlycausing a lot of work but also being a potential source for errors. In thisresearch we implemented a system that provides a positioning system totrack forces and allows sharing information in real-time. Rescue units canannotate dierent zones and broadcast data to other units whose mapsare automatically updated with available annotations.We show how sucha system can be operated based on an independent infrastructure whichmakes it robust and reliable in emergency and catastrophe situations.
@InProceedings{mueller2010pevasiveadj,
author = {Alexander M\"{u}ller AND Alireza Sahami Shirazi AND Florian Alt AND Albrecht Schmidt},
title = {{ZoneTrak: Design and Implementation of an Emergency Management Assistance System}},
booktitle = {Adjunct Proceedings of the Eigth International Conference on Pervasive Computing},
year = {2010},
series = {Pervasive'10},
abstract = {Though pervasive computing technologies are omnipresentin our daily lives, emergency cases, such as earthquakes, or res oftencause serious damages to the underlying infrastructure. In such casesrescue units rely on paper maps of the operation areas and importantinformation are broadcasted either from a central unit or from otherteams. This information is manually updated on the paper map, not onlycausing a lot of work but also being a potential source for errors. In thisresearch we implemented a system that provides a positioning system totrack forces and allows sharing information in real-time. Rescue units canannotate dierent zones and broadcast data to other units whose mapsare automatically updated with available annotations.We show how sucha system can be operated based on an independent infrastructure whichmakes it robust and reliable in emergency and catastrophe situations.},
owner = {flo},
timestamp = {2010.06.02},
}
F. Alt, A. S. Shirazi, S. Legien, A. Schmidt, and J. Mennenöh. Creating Meaningful Melodies from Text Messages. In Proceedings of the 2010 conference on new interfaces for musical expression (NIME’10), 2010, p. 63–68.
[BibTeX] [Abstract] [PDF]
Writing text messages (e.g. email, SMS, instant messaging) is apopular form of synchronous and asynchronous communication.However, when it comes to notifying users about newmessages, current audio-based approaches, such as notificationtones, are very limited in conveying information. In this paperwe show how entire text messages can be encoded into a meaningfuland euphonic melody in such a way that users can guessa message’s intention without actually seeing the content. First,as a proof of concept, we report on the findings of an initial onlinesurvey among 37 musicians and 32 non-musicians evaluatingthe feasibility and validity of our approach. We show thatour representation is understandable and that there are no significantdifferences between musicians and non-musicians.Second, we evaluated the approach in a real world scenariobased on a Skype plug-in. In a field study with 14 participantswe showed that sonified text messages strongly impact on theusers’ message checking behavior by significantly reducing thetime between receiving and reading an incoming message.
@InProceedings{alt2010nime,
author = {Alt, F. and Shirazi, A.S. and Legien, S. and Schmidt, A. and Mennen{\"o}h, J.},
title = {{Creating Meaningful Melodies from Text Messages}},
booktitle = {Proceedings of the 2010 Conference on New Interfaces for Musical Expression},
year = {2010},
series = {NIME'10},
pages = {63--68},
month = {jun},
note = {alt2010nime},
abstract = {Writing text messages (e.g. email, SMS, instant messaging) is apopular form of synchronous and asynchronous communication.However, when it comes to notifying users about newmessages, current audio-based approaches, such as notificationtones, are very limited in conveying information. In this paperwe show how entire text messages can be encoded into a meaningfuland euphonic melody in such a way that users can guessa message’s intention without actually seeing the content. First,as a proof of concept, we report on the findings of an initial onlinesurvey among 37 musicians and 32 non-musicians evaluatingthe feasibility and validity of our approach. We show thatour representation is understandable and that there are no significantdifferences between musicians and non-musicians.Second, we evaluated the approach in a real world scenariobased on a Skype plug-in. In a field study with 14 participantswe showed that sonified text messages strongly impact on theusers’ message checking behavior by significantly reducing thetime between receiving and reading an incoming message.},
timestamp = {2010.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2010nime.pdf},
}
G. Beyer, F. Alt, S. Klose, K. Isakovic, A. S. Shirazi, and A. Schmidt. Design Space for Large Cylindrical Screens. In Proceedings of the third international workshop on pervasive avertising and shopping (PerAd’10), Helsinki, Finland, 2010.
[BibTeX] [Abstract] [PDF]
The era of modern cylindrical screens, so-called advertising columns,began in the middle of the 19th century. Even nowadays they are still a popularadvertising medium, which integrates well with urban environments. Withadvances in display technologies (LEDs, projectors) digital forms of suchcolumns emerge and enable novel forms of visualization and interaction, whichsignificantly differ from flat, rectangular screens due to the round shape. In thispaper we present the design space for large cylindrical screens and outlinedesign principles based on observations and experiments with a prototype of adigital column. We especially focus on the differences with flat, rectangulardisplays and report on challenges related to the deployment and development ofapplications for cylindrical screens.
@InProceedings{beyer2010perad,
author = {Gilbert Beyer AND Florian Alt AND Stefan Klose AND Karsten Isakovic AND Alireza Sahami Shirazi AND Albrecht Schmidt},
title = {{Design Space for Large Cylindrical Screens}},
booktitle = {Proceedings of the Third International Workshop on Pervasive Avertising and Shopping},
year = {2010},
month = {jun},
abstract = {The era of modern cylindrical screens, so-called advertising columns,began in the middle of the 19th century. Even nowadays they are still a popularadvertising medium, which integrates well with urban environments. Withadvances in display technologies (LEDs, projectors) digital forms of suchcolumns emerge and enable novel forms of visualization and interaction, whichsignificantly differ from flat, rectangular screens due to the round shape. In thispaper we present the design space for large cylindrical screens and outlinedesign principles based on observations and experiments with a prototype of adigital column. We especially focus on the differences with flat, rectangulardisplays and report on challenges related to the deployment and development ofapplications for cylindrical screens.},
owner = {flo},
timestamp = {2010.06.01},
}
J. Mennenöh, S. Kristes, F. Alt, A. S. S. Shirazi, A. Schmidt, and H. Schröder. Customer Touchpoints im stationären Einzelhandel – Potenzial von Pervasive Computing. Marketing review st .gallen, vol. 27, iss. 2, p. 37–42, 2010.
[BibTeX] [Abstract] [PDF]
Je mehr individuelle Leistungen die Kunden verlangen, desto mehr Informationen benötigen die Anbieter.Während die Händler im Distanzgeschäft über personalisierte Daten ihrer Kunden und vor allem im Online-Shop über Bewegungsdaten verfügen, hat der stationäre Einzelhandel noch erhebliche Datenlücken.Diese Lücken kann man mit einer Pervasive-Computing-Umgebung schließen. Neue CustomerTouchpointsliefern Informationen darüber, wer bei ihm einkauft und wie der Einkauf durchgeführt wird.
@Article{mennenoeh2010mrsg,
author = {Mennen\"{o}h, Julian and Kristes, Stefanie and Alt, Florian and Shirazi, Alireza Sahami Sahami and Schmidt, Albrecht and Schr\"{o}der, Hendrik},
title = {{Customer Touchpoints im station\"{a}ren Einzelhandel -- Potenzial von Pervasive Computing}},
journal = {Marketing Review St .Gallen},
year = {2010},
volume = {27},
number = {2},
pages = {37--42},
note = {mennenoeh2010mrsg},
abstract = {Je mehr individuelle Leistungen die Kunden verlangen, desto mehr Informationen benötigen die Anbieter.Während die Händler im Distanzgeschäft über personalisierte Daten ihrer Kunden und vor allem im Online-Shop über Bewegungsdaten verfügen, hat der stationäre Einzelhandel noch erhebliche Datenlücken.Diese Lücken kann man mit einer Pervasive-Computing-Umgebung schließen. Neue CustomerTouchpointsliefern Informationen darüber, wer bei ihm einkauft und wie der Einkauf durchgeführt wird.},
publisher = {Springer},
timestamp = {2010.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mennenoeh2010mrsg.pdf},
}
J. D. H. Ramos, A. Tabard, and F. Alt. Contextual-Analysis for Infrastructure Awareness Systems. In Proccedings of the chi workshop “bridging the gap: moving from contextual analysis to design” (), Atlanta, GA, USA, 2010.
[BibTeX] [PDF]
@InProceedings{ramos2010chiws,
author = {Juan David Hincapie Ramos AND Aurelien Tabard AND Florian Alt},
title = {{Contextual-Analysis for Infrastructure Awareness Systems}},
booktitle = {Proccedings of the CHI Workshop Bridging the Gap: Moving from Contextual Analysis to Design''},
year = {2010},
note = {ramos2010chiws},
owner = {flo},
timestamp = {2010.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/},
}
I. Reif, F. Alt, J. D. Hincapié Ramos, K. Poteriaykina, and J. Wagner. Cleanly: Trashducation Urban System. In Chi ’10 extended abstracts on human factors in computing systems (CHI EA’10), ACM, New York, NY, USA, 2010, p. 3511–3516. doi:10.1145/1753846.1754010
[BibTeX] [Abstract] [PDF]
Half the world’s population is expected to live in urban areas by 2020. The high human density and changes in peoples’ consumption habits result in an ever-increasing amount of trash that must be handled by governing bodies. Problems created by inefficient or dysfunctional cleaning services are exacerbated by a poor personal trash management culture. In this paper we present Cleanly, an urban trashducation system aimed at creating awareness of garbage production and management, which may serve as an educational plat-form in the urban environment. We report on data collected from an online survey, which not only motivates our research but also provides useful information on reasons and possible solutions for trash problems.
@InProceedings{reif2010chiea,
author = {Reif, Inbal and Alt, Florian and Hincapi{\'e} Ramos, Juan David and Poteriaykina, Katerina and Wagner, Johannes},
title = {{Cleanly: Trashducation Urban System}},
booktitle = {CHI '10 Extended Abstracts on Human Factors in Computing Systems},
year = {2010},
series = {CHI EA'10},
pages = {3511--3516},
address = {New York, NY, USA},
publisher = {ACM},
note = {reif2010chiea},
abstract = {Half the world's population is expected to live in urban areas by 2020. The high human density and changes in peoples' consumption habits result in an ever-increasing amount of trash that must be handled by governing bodies. Problems created by inefficient or dysfunctional cleaning services are exacerbated by a poor personal trash management culture. In this paper we present Cleanly, an urban trashducation system aimed at creating awareness of garbage production and management, which may serve as an educational plat-form in the urban environment. We report on data collected from an online survey, which not only motivates our research but also provides useful information on reasons and possible solutions for trash problems.},
acmid = {1754010},
doi = {10.1145/1753846.1754010},
isbn = {978-1-60558-930-5},
keywords = {design, interaction, public displays, recycling, rfid badges, trashducation, ubiquitous display environments},
location = {Atlanta, Georgia, USA},
numpages = {6},
timestamp = {2010.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/reif2010chiea.pdf},
}
F. Alt, A. S. Shirazi, A. Kaiser, K. Pfeuffer, E. Gurkan, A. Schmidt, P. Holleis, and M. Wagner. Exploring Ambient Visualizations of Context Information. In Adjunct proceedings of the eigth annual ieee international conference on pervasive computing and communications (PerCom’09), IEEE, Mannheim, Germany, 2010, pp. 788-791.
[BibTeX] [Abstract] [PDF]
In this paper we investigate how ambient displays can be used to share context information. Currently, many personal devices provide context information, such as location or activity, and at the same time the number of ambient displays is increasing. We developed two prototypes for visualizing contextual information and initially explored the suitability of these in an online study. Additionally, we investigated which parameters are important for users when sharing personal context. Based on our findings we discuss guidelines for the design of ambient displays for context sharing.
@InProceedings{alt2010percomadj,
author = {Florian Alt and Alireza Sahami Shirazi and Andreas Kaiser and Ken Pfeuffer and Emre Gurkan and Albrecht Schmidt and Paul Holleis and Matthias Wagner},
booktitle = {Adjunct Proceedings of the Eigth Annual IEEE International Conference on Pervasive Computing and Communications},
title = {{Exploring Ambient Visualizations of Context Information}},
year = {2010},
month = {apr},
pages = {788-791},
publisher = {IEEE},
series = {PerCom'09},
abstract = {In this paper we investigate how ambient displays can be used to share context information. Currently, many personal devices provide context information, such as location or activity, and at the same time the number of ambient displays is increasing. We developed two prototypes for visualizing contextual information and initially explored the suitability of these in an online study. Additionally, we investigated which parameters are important for users when sharing personal context. Based on our findings we discuss guidelines for the design of ambient displays for context sharing.},
bibsource = {DBLP, http://dblp.uni-trier.de},
ee = {http://dx.doi.org/10.1109/PERCOMW.2010.5470542},
timestamp = {2010.04.01},
}
A. S. Shirazi, T. Kubitza, F. Alt, B. Pfleging, and A. Schmidt. WEtransport: A Context-based Ride Sharing Platform. In Adjunct proceedings of the twelfth international conference on ubiquitous computing (Ubicomp’10), Copenhagen, Danmark, 2010.
[BibTeX] [Abstract] [PDF]
In densely populated urban areas high amounts of trafficpose a major problem, which affects the environment, economy,and our lives. From a user’s perspective, the main issuesinclude delays due to traffic jams, lack of parking spaceand high costs due to increasing fuel prices (e.g., if commutinglong distances). Collective transportation (CT), e.g., publictransport systems, provides a partly solution to these issues.Yet, CT does not support door-to-door transportationhence reducing convenience; it might be limited in off-peakhours, and it is still a cost factor when travelling long distances.A solution to these issues is ride sharing, an evolvingform of CT making alternative transportation more affordable.In this paper we present a modular, context-aware ridesharing platform. We aim at enhancing convenience, reliability,and affordability of different forms of ride sharing bymeans of context data. Additionally our approach supports aneasy server- and client-side expansion due to the modularplatform structure.Author Keywords ride
@InProceedings{sahami2010ubicompadj,
author = {Alireza Sahami Shirazi AND Thomas Kubitza AND Florian Alt AND Bastian Pfleging AND Albrecht Schmidt},
title = {{WEtransport: A Context-based Ride Sharing Platform}},
booktitle = {Adjunct Proceedings of the Twelfth International Conference on Ubiquitous Computing},
year = {2010},
series = {Ubicomp'10},
abstract = {In densely populated urban areas high amounts of trafficpose a major problem, which affects the environment, economy,and our lives. From a user’s perspective, the main issuesinclude delays due to traffic jams, lack of parking spaceand high costs due to increasing fuel prices (e.g., if commutinglong distances). Collective transportation (CT), e.g., publictransport systems, provides a partly solution to these issues.Yet, CT does not support door-to-door transportationhence reducing convenience; it might be limited in off-peakhours, and it is still a cost factor when travelling long distances.A solution to these issues is ride sharing, an evolvingform of CT making alternative transportation more affordable.In this paper we present a modular, context-aware ridesharing platform. We aim at enhancing convenience, reliability,and affordability of different forms of ride sharing bymeans of context data. Additionally our approach supports aneasy server- and client-side expansion due to the modularplatform structure.Author Keywords ride},
owner = {flo},
timestamp = {2010.03.01},
}
A. Sahami Shirazi, A. Sarjanoja, F. Alt, A. Schmidt, and J. Häkkilä. Understanding the Impact of Abstracted Audio Preview of SMS. In Proceedings of the 28th international conference on human factors in computing systems (CHI’10), ACM, New York, NY, USA, 2010, p. 1735–1738. doi:10.1145/1753326.1753585
[BibTeX] [Abstract] [PDF]
Despite the availability of other mobile messaging applications, SMS has kept its position as a heavily used communication technology. However, there are many situations in which it is inconvenient or inappropriate to check a message’s content immediately. In this paper, we introduce the concept of audio previews of SMS. Based on a real-time analysis of the content of a message, we provide auditory cues in addition to the notification tone upon receiving an SMS. We report on a field trial with 20 participants and show that the use of audio-enhanced SMS affects the reading and writing behavior of users. Our work is motivated by the results of an online survey among 347 SMS users of whose we analyzed 3400 text messages.
@InProceedings{sahami2010chi,
author = {Sahami Shirazi, Alireza and Sarjanoja, Ari-Heikki and Alt, Florian and Schmidt, Albrecht and H\"{a}kkil\"{a}, Jonna},
booktitle = {Proceedings of the 28th International Conference on Human Factors in Computing Systems},
title = {{Understanding the Impact of Abstracted Audio Preview of SMS}},
year = {2010},
address = {New York, NY, USA},
note = {sahami2010chi},
pages = {1735--1738},
publisher = {ACM},
series = {CHI'10},
abstract = {Despite the availability of other mobile messaging applications, SMS has kept its position as a heavily used communication technology. However, there are many situations in which it is inconvenient or inappropriate to check a message's content immediately. In this paper, we introduce the concept of audio previews of SMS. Based on a real-time analysis of the content of a message, we provide auditory cues in addition to the notification tone upon receiving an SMS. We report on a field trial with 20 participants and show that the use of audio-enhanced SMS affects the reading and writing behavior of users. Our work is motivated by the results of an online survey among 347 SMS users of whose we analyzed 3400 text messages.},
acmid = {1753585},
doi = {10.1145/1753326.1753585},
isbn = {978-1-60558-929-9},
keywords = {auditory ui, emoticon, mobile phone, sms, user studies},
location = {Atlanta, Georgia, USA},
numpages = {4},
timestamp = {2010.01.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/sahami2010chi.pdf},
}

### 2009

F. Alt, M. Balz, S. Kristes, A. S. Shirazi, J. Mennenöh, A. Schmidt, H. Schröder, and M. Gödicke. Adaptive User Profiles in Pervasive Advertising Environments. In Proceedings of the european conference on ambient intelligence (AmI’09), Springer-Verlag, Berlin, Heidelberg, 2009, p. 276–286. doi:10.1007/978-3-642-05408-2_32
[BibTeX] [Abstract] [PDF]
Nowadays modern advertising environments try to provide more efficient ads by targeting costumers based on their interests. Various approaches exist today as to how information about the users’ interests can be gathered. Users can deliberately and explicitly provide this information or user’s shopping behaviors can be analyzed implicitly. We implemented an advertising platform to simulate an advertising environment and present adaptive profiles, which let users setup profiles based on a self-assessment, and enhance those profiles with information about their real shopping behavior as well as about their activity intensity. Additionally, we explain how pervasive technologies such as Bluetooth can be used to create a profile anonymously and unobtrusively.
@InProceedings{alt2009ami,
author = {Alt, Florian and Balz, Moritz and Kristes, Stefanie and Shirazi, Alireza Sahami and Mennen\"{o}h, Julian and Schmidt, Albrecht and Schr\"{o}der, Hendrik and G\"{o}dicke, Michael},
booktitle = {Proceedings of the European Conference on Ambient Intelligence},
year = {2009},
series = {AmI'09},
pages = {276--286},
month = {nov},
publisher = {Springer-Verlag},
note = {alt2009ami},
abstract = {Nowadays modern advertising environments try to provide more efficient ads by targeting costumers based on their interests. Various approaches exist today as to how information about the users’ interests can be gathered. Users can deliberately and explicitly provide this information or user’s shopping behaviors can be analyzed implicitly. We implemented an advertising platform to simulate an advertising environment and present adaptive profiles, which let users setup profiles based on a self-assessment, and enhance those profiles with information about their real shopping behavior as well as about their activity intensity. Additionally, we explain how pervasive technologies such as Bluetooth can be used to create a profile anonymously and unobtrusively.},
acmid = {1694666},
doi = {10.1007/978-3-642-05408-2_32},
isbn = {978-3-642-05407-5},
location = {Salzburg, Austria},
numpages = {11},
timestamp = {2009.11.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009ami.pdf},
}
F. Alt, A. S. Shirazi, M. Pfeiffer, P. Holleis, and A. S. (Workshop).. TaxiMedia: An Interactive Context-Aware Entertainment and Advertising System. In Proceedings of the second international workshop on pervasive advertising (PerAd’09), Lübeck, Germany, 2009.
[BibTeX] [Abstract] [PDF]
@InProceedings{alt2009perad2,
author = {Florian Alt AND Alireza Sahami Shirazi AND Max Pfeiffer AND Paul Holleis AND Albrecht Schmidt (Workshop).},
title = {{TaxiMedia: An Interactive Context-Aware Entertainment and Advertising System}},
booktitle = {Proceedings of the Second International Workshop on Pervasive Advertising},
year = {2009},
month = {oct},
owner = {flo},
timestamp = {2009.10.01},
}
F. Alt, A. Schmidt, R. Atterer, and P. Holleis. Bringing Web 2.0 to the Old Web: A Platform for Parasitic Applications. In Proceedings of the 12th ifip tc 13 international conference on human-computer interaction: part i (INTERACT’09), Springer-Verlag, Berlin, Heidelberg, 2009, p. 405–418. doi:10.1007/978-3-642-03655-2_44
[BibTeX] [Abstract] [PDF]
It is possible to create interactive, responsive web applications that allow user-generated contributions. However, the relevant technologies have to be explicitly deployed by the authors of the web pages. In this work we present the concept of parasitic and symbiotic web applications which can be deployed on arbitrary web pages by means of a proxy-based application platform. Such applications are capable of inserting, editing and deleting the content of web pages. We use an HTTP proxy in order to insert JavaScript code on each web page that is delivered from the web server to the browser. Additionally we use a database server hosting user-generated scripts as well as high-level APIs allowing for implementing customized web applications. Our approach is capable of cooperating with existing web pages by using shared standards (e.g. formatting of the structure on DOM level) and common APIs but also allows for user-generated (parasitic) applications on arbitrary web pages without the need for cooperation by the page owner.
@InProceedings{alt2009interact,
author = {Alt, Florian and Schmidt, Albrecht and Atterer, Richard and Holleis, Paul},
title = {{Bringing Web 2.0 to the Old Web: A Platform for Parasitic Applications}},
booktitle = {Proceedings of the 12th IFIP TC 13 International Conference on Human-Computer Interaction: Part I},
year = {2009},
series = {INTERACT'09},
pages = {405--418},
month = {sep},
publisher = {Springer-Verlag},
note = {alt2009interact},
abstract = {It is possible to create interactive, responsive web applications that allow user-generated contributions. However, the relevant technologies have to be explicitly deployed by the authors of the web pages. In this work we present the concept of parasitic and symbiotic web applications which can be deployed on arbitrary web pages by means of a proxy-based application platform. Such applications are capable of inserting, editing and deleting the content of web pages. We use an HTTP proxy in order to insert JavaScript code on each web page that is delivered from the web server to the browser. Additionally we use a database server hosting user-generated scripts as well as high-level APIs allowing for implementing customized web applications. Our approach is capable of cooperating with existing web pages by using shared standards (e.g. formatting of the structure on DOM level) and common APIs but also allows for user-generated (parasitic) applications on arbitrary web pages without the need for cooperation by the page owner.},
acmid = {1615858},
doi = {10.1007/978-3-642-03655-2_44},
isbn = {978-3-642-03654-5},
location = {Uppsala, Sweden},
numpages = {14},
timestamp = {2009.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009interact.pdf},
}
A. S. Shirazi, F. Alt, A. Schmidt, A. Sarjanoja, L. Hynninen, J. Häkkilä, and P. Holleis. Emotion Sharing Via Self-Composed Melodies on Mobile Phones. In Proceedings of the 11th international conference on human-computer interaction with mobile devices and services (MobileHCI’09), ACM, New York, NY, USA, 2009, p. 301–304. doi:10.1145/1613858.1613897
[BibTeX] [Abstract] [PDF]
In their role as personal communication devices, mobile phones are a natural choice for sharing and communicating emotions. However, their functionalities are currently very limited in power to express affective messages. In this paper, we describe the design of a system that allows users to easily compose melodies and share them via mobile phones. We show that by using these melodies information about the current emotional state of the sender can be expressed and recognized synchronously by the receiver in a simple, quick, and unobtrusive way. Further, we reveal that self-composed melodies have a stronger impact than pre-composed or downloaded messages, similar to crafted pieces of art offered to a beloved person. We then present findings from a user study that assesses the implementation of a functional prototype and the adequacy of the system for emotional communication.
@InProceedings{sahami2009mobilehci,
author = {Shirazi, Alireza Sahami and Alt, Florian and Schmidt, Albrecht and Sarjanoja, Ari-Heikki and Hynninen, Lotta and H\"{a}kkil\"{a}, Jonna and Holleis, Paul},
title = {{Emotion Sharing Via Self-Composed Melodies on Mobile Phones}},
booktitle = {Proceedings of the 11th International Conference on Human-Computer Interaction with Mobile Devices and Services},
year = {2009},
series = {MobileHCI'09},
pages = {301--304},
address = {New York, NY, USA},
publisher = {ACM},
note = {sahami2009mobilehci},
abstract = {In their role as personal communication devices, mobile phones are a natural choice for sharing and communicating emotions. However, their functionalities are currently very limited in power to express affective messages. In this paper, we describe the design of a system that allows users to easily compose melodies and share them via mobile phones. We show that by using these melodies information about the current emotional state of the sender can be expressed and recognized synchronously by the receiver in a simple, quick, and unobtrusive way. Further, we reveal that self-composed melodies have a stronger impact than pre-composed or downloaded messages, similar to crafted pieces of art offered to a beloved person. We then present findings from a user study that assesses the implementation of a functional prototype and the adequacy of the system for emotional communication.},
acmid = {1613897},
articleno = {30},
doi = {10.1145/1613858.1613897},
isbn = {978-1-60558-281-8},
keywords = {composer, emotion sharing, mobile phone, synchronous},
location = {Bonn, Germany},
numpages = {4},
timestamp = {2009.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/sahami2009mobilehci.pdf},
}
F. Alt, A. Schmidt, and C. Evers. Mobile Contextual Displays. In Proceedings of the first international workshop on pervasive advertising (PerAd’09), Nara, Japan, 2009.
[BibTeX] [PDF]
@InProceedings{alt2009perad1,
author = {Florian Alt AND Albrecht Schmidt AND Christoph Evers},
title = {{Mobile Contextual Displays}},
booktitle = {Proceedings of the First International Workshop on Pervasive Advertising},
year = {2009},
month = {jun},
owner = {flo},
timestamp = {2009.06.01},
}
F. Alt, C. Evers, and A. Schmidt. Users’ View on Context-Sensitive Car Advertisements. In Proceedings of the 7th international conference on pervasive computing (Pervasive’09), Springer-Verlag, Berlin, Heidelberg, 2009, p. 9–16. doi:10.1007/978-3-642-01516-8_2
[BibTeX] [Abstract] [PDF]
@InProceedings{alt2009pervasive,
author = {Alt, Florian and Evers, Christoph and Schmidt, Albrecht},
booktitle = {Proceedings of the 7th International Conference on Pervasive Computing},
year = {2009},
series = {Pervasive'09},
pages = {9--16},
month = {jun},
publisher = {Springer-Verlag},
note = {alt2009pervasive},
acmid = {1560007},
doi = {10.1007/978-3-642-01516-8_2},
isbn = {978-3-642-01515-1},
location = {Nara, Japan},
numpages = {8},
timestamp = {2009.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009pervasive.pdf},
}
F. Alt, C. Evers, and A. Schmidt. Mobile Public Display Systems. In Adjunct proceedings of the tenth workshop on mobile computing, systems, and applications (HotMobile’09), Santa Cruz, CA, USA, 2009.
[BibTeX] [PDF]
@InProceedings{alt2009hotmobileadj,
author = {Florian Alt AND Christoph Evers AND Albrecht Schmidt},
title = {{Mobile Public Display Systems}},
booktitle = {Adjunct Proceedings of the Tenth Workshop on Mobile Computing, Systems, and Applications},
year = {2009},
series = {HotMobile'09},
address = {Santa Cruz, CA, USA},
month = {jun},
owner = {flo},
timestamp = {2009.06.01},
}

### 2008

A. Schmidt, F. Alt, P. Holleis, J. Mueller, and A. Krueger. Creating Log Files and Click Streams for Advertisements in Physical Space. In Adjunct proceedings of the 10th international conference on ubiquitous computing (Ubicomp’08), Seoul, South Korea, 2008, p. 28–29.
[BibTeX] [Abstract] [PDF]
Poster advertisement has a long tradition and is transformingrapidly into digital media. In this paper we provide anoverview of how sensing can be used to create online andup to date information about potential viewers. We assesswhat application domains can benefit from continuousmonitoring of visitors. As measuring with simple sensors isinherently error prone we suggest the notion of comparativeadvertising power which compares the number of potentialviewers in different locations. We address user acceptanceand privacy concerns and show technical mechanism toincrease privacy.
@Conference{schmidt2008ubicompadj,
author = {Schmidt, A. and Alt, F. and Holleis, P. and Mueller, J. and Krueger, A.},
title = {{Creating Log Files and Click Streams for Advertisements in Physical Space}},
booktitle = {Adjunct Proceedings of the 10th International Conference on Ubiquitous Computing},
year = {2008},
series = {Ubicomp'08},
pages = {28--29},
abstract = {Poster advertisement has a long tradition and is transformingrapidly into digital media. In this paper we provide anoverview of how sensing can be used to create online andup to date information about potential viewers. We assesswhat application domains can benefit from continuousmonitoring of visitors. As measuring with simple sensors isinherently error prone we suggest the notion of comparativeadvertising power which compares the number of potentialviewers in different locations. We address user acceptanceand privacy concerns and show technical mechanism toincrease privacy.},
timestamp = {2008.10.01},
}

### 2007

F. Alt, A. Sahami Shirazi, and A. Schmidt. Monitoring Heartbeat per Day to Motivate Increasing Physical Activity. In Proceedings of the ubicomp workshop on interaction with ubiquitous wellness and healthcare applications (UbiWell’07), Innsbruck, Austria, 2007.
[BibTeX] [Abstract] [PDF]
Physical activity is one of the most basic humanfunctions and essential for physical and mental health andhas major beneficial effect on chronic diseases such asheart disease, stroke, etc. Awareness of this condition hasbeen one of the focuses for researching over recent years.One common way for making this awareness is monitoringthe number of steps taken by a person and comparing itwith the minimum amount of steps s/he needs. In thispaper we suggest that instead of this, heartbeat can bemonitored to aware physical activity.
@InProceedings{alt2008ubicompadj,
author = {Alt, Florian AND Sahami Shirazi, Alireza AND Schmidt, Albrecht},
title = {{Monitoring Heartbeat per Day to Motivate Increasing Physical Activity}},
booktitle = {Proceedings of the Ubicomp Workshop on Interaction with Ubiquitous Wellness and Healthcare Applications},
year = {2007},
series = {UbiWell'07},
abstract = {Physical activity is one of the most basic humanfunctions and essential for physical and mental health andhas major beneficial effect on chronic diseases such asheart disease, stroke, etc. Awareness of this condition hasbeen one of the focuses for researching over recent years.One common way for making this awareness is monitoringthe number of steps taken by a person and comparing itwith the minimum amount of steps s/he needs. In thispaper we suggest that instead of this, heartbeat can bemonitored to aware physical activity.},
owner = {flo},
timestamp = {2007.03.01},
}

### 2006

A. Schmidt, F. Alt, D. Wilhelm, J. Niggemann, and H. Feussner. Experimenting with Ubiquitous Computing Technologies in Productive Environments. Elektrotechnik und informationstechnik, vol. 123, iss. 4, pp. 135-139, 2006.
[BibTeX] [Abstract] [PDF]
Ubiquitous computing techniques are ideal tools to bring new solutions to environments which are otherwise quite resistant to rapidchange. In this paper we present techniques to carry out experiments in the very heterogeneous environment of a hospital’s decisionmaking conference, the ‘‘tumour board’’. Introducing the concept of surface interaction we demonstrate how information from varioussources such as X-ray film, slide presentations and projections of CT scans together with oral comments and typed notes can be capturedand made available for surgeons’ use in the operating theatre, without interfering with the ‘‘old’’ way of holding the meeting and withoutputting any extra burden on the hospital staff.
@Article{schmidt2006elektrotechnik,
author = {Albrecht Schmidt and Florian Alt and Dirk Wilhelm and J{\"o}rg Niggemann and Hubertus Feussner},
title = {{Experimenting with Ubiquitous Computing Technologies in Productive Environments}},
journal = {Elektrotechnik und Informationstechnik},
year = {2006},
volume = {123},
number = {4},
pages = {135-139},
note = {schmidt2006elektrotechnik},
abstract = {Ubiquitous computing techniques are ideal tools to bring new solutions to environments which are otherwise quite resistant to rapidchange. In this paper we present techniques to carry out experiments in the very heterogeneous environment of a hospital’s decisionmaking conference, the ‘‘tumour board’’. Introducing the concept of surface interaction we demonstrate how information from varioussources such as X-ray film, slide presentations and projections of CT scans together with oral comments and typed notes can be capturedand made available for surgeons’ use in the operating theatre, without interfering with the ‘‘old’’ way of holding the meeting and withoutputting any extra burden on the hospital staff.},
timestamp = {2006.10.19},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schmidt2006elektrotechnik.pdf},
}

### 2017

F. Hartmann, Time-constrained access control for mobile devices, 2017.
[BibTeX] [Abstract]
In this thesis, a novel concept to unlock smartphones was elaborated. It enables an alternative time-constrainedsession on the smartphone for short access. Before the concept was developed, existing research about smartphone usage,unlock behaviors and non-standard unlock methods was explored. The final concept was implemented as an installable Androidapplication afterwards. The prototype application called SnapApp combines the two already known unlock methods PIN andslide-to-unlock in one lockscreen. The user can decide to either get full access by prompting PIN or to get constrained shortaccess by using slide-to-unlock. A longitudinal field study was conducted by installing the prototype on the smartphones of18 participants, who tested the new lockscreen for a duration of 30 days. Results revealed that SnapApp was able to reducePIN prompts by 20% in total, which also saved valuable time. The security was not impaired, as the majority of the usershas individually configured the maximum session lengths, the expirations of available short sessions and the blacklists, whichcontain apps protected of usage during short access. This was also confirmed by the feedback questionnaires of the study.SnapApp can be adapted to different user needs and was thereby equally accepted by PIN, pattern and swipe users. Besides,the prototype requires no further hardware or sensors and can thus be installed on any Android smartphone.
@Misc{hartmann2017lmu,
author = {Fabian Hartmann},
title = {Time-constrained access control for mobile devices},
howpublished = {LMU Mnchen},
year = {2017},
note = {hartmann2017lmu},
abstract = {In this thesis, a novel concept to unlock smartphones was elaborated. It enables an alternative time-constrainedsession on the smartphone for short access. Before the concept was developed, existing research about smartphone usage,unlock behaviors and non-standard unlock methods was explored. The final concept was implemented as an installable Androidapplication afterwards. The prototype application called SnapApp combines the two already known unlock methods PIN andslide-to-unlock in one lockscreen. The user can decide to either get full access by prompting PIN or to get constrained shortaccess by using slide-to-unlock. A longitudinal field study was conducted by installing the prototype on the smartphones of18 participants, who tested the new lockscreen for a duration of 30 days. Results revealed that SnapApp was able to reducePIN prompts by 20% in total, which also saved valuable time. The security was not impaired, as the majority of the usershas individually configured the maximum session lengths, the expirations of available short sessions and the blacklists, whichcontain apps protected of usage during short access. This was also confirmed by the feedback questionnaires of the study.SnapApp can be adapted to different user needs and was thereby equally accepted by PIN, pattern and swipe users. Besides,the prototype requires no further hardware or sensors and can thus be installed on any Android smartphone.},
owner = {florian},
school = {LMU Mnchen},
timestamp = {2019.04.23},
}