Loading…

Publications

2019

M. Braun, A. Mainz, R. Chadowitz, B. Pfleging, and F. Alt, “At Your Service: Designing Voice AssistantPersonalities to Improve Automotive User Interfaces,” in Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems, New York, NY, USA, 2019.
[BibTeX] [Abstract] [Download PDF]
Every person is unique, with individual behavioural characteristics:how one moves, coordinates, and uses their body.In this paper we investigate body motion as behavioural biometricsfor virtual reality. In particular, we look into whichbehaviour is suitable to identify a user. This is valuable insituations where multiple people use a virtual reality environmentin parallel, for example in the context of authenticationor to adapt the VR environment to users’ preferences. Wepresent a user study (N=22) where people perform controlledVR tasks (pointing, grabbing, walking, typing), monitoringtheir head, hand, and eye motion data over two sessions.These body segments can be arbitrarily combined into bodyrelations, and we found that these movements and theircombination lead to characteristic behavioural patterns. Wepresent an extensive analysis of which motion/relation isuseful to identify users in which tasks using classificationmethods. Our findings are beneficial for researchers and practitionersalike who aim to build novel adaptive and secureuser interfaces in virtual reality.
@InProceedings{braun2019chi,
author = {Michael Braun AND Anja Mainz AND Ronee Chadowitz AND Bastian Pfleging AND Florian Alt},
title = {{At Your Service: Designing Voice AssistantPersonalities to Improve Automotive User Interfaces}},
booktitle = {{Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}},
year = {2019},
series = {CHI '19},
address = {New York, NY, USA},
publisher = {ACM},
note = {braun2019chi},
abstract = {Every person is unique, with individual behavioural characteristics:how one moves, coordinates, and uses their body.In this paper we investigate body motion as behavioural biometricsfor virtual reality. In particular, we look into whichbehaviour is suitable to identify a user. This is valuable insituations where multiple people use a virtual reality environmentin parallel, for example in the context of authenticationor to adapt the VR environment to users’ preferences. Wepresent a user study (N=22) where people perform controlledVR tasks (pointing, grabbing, walking, typing), monitoringtheir head, hand, and eye motion data over two sessions.These body segments can be arbitrarily combined into bodyrelations, and we found that these movements and theircombination lead to characteristic behavioural patterns. Wepresent an extensive analysis of which motion/relation isuseful to identify users in which tasks using classificationmethods. Our findings are beneficial for researchers and practitionersalike who aim to build novel adaptive and secureuser interfaces in virtual reality.},
comment = {braun2019chi},
keywords = {Virtual Reality, Behavioural Biometrics, Motion, Relation,Proprioception, Adaptive UIs},
location = {Glasgow, UK},
numpages = {12},
timestamp = {2019.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/braun2019chi.pdf},
}
K. Pfeuffer, M. Geiger, S. Prange, L. Mecke, D. Buschek, and F. Alt, “Behavioural Biometrics in VR – Identifying People from Body Motion and Relations in Virtual Reality,” in Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems, New York, NY, USA, 2019.
[BibTeX] [Abstract] [Download PDF]
This paper investigates personalized voice characters for incarspeech interfaces. In particular, we report on how wedesigned different personalities for voice assistants and comparedthem in a real world driving study. Voice assistantshave become important for a wide range of use cases, yet current interfaces are using the same style of auditory responsein every situation, despite varying user needs andpersonalities. To close this gap, we designed four assistantpersonalities (Friend, Admirer, Aunt, and Butler) and comparedthem to a baseline (Default) in a between-subject studyin real traffic conditions. Our results show higher likabilityand trust for assistants that correctly match the user’s personalitywhile we observed lower likability, trust, satisfaction,and usefulness for incorrectly matched personalities, eachin comparison with the Default character. We discuss designaspects for voice assistants in different automotive use cases.
@InProceedings{pfeuffer2019chi,
author = {Ken Pfeuffer AND Matthias Geiger AND Sarah Prange AND Lukas Mecke AND Daniel Buschek AND Florian Alt},
title = {{Behavioural Biometrics in VR - Identifying People from Body Motion and Relations in Virtual Reality}},
booktitle = {{Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}},
year = {2019},
series = {CHI '19},
address = {New York, NY, USA},
publisher = {ACM},
note = {pfeuffer2019chi},
abstract = {This paper investigates personalized voice characters for incarspeech interfaces. In particular, we report on how wedesigned different personalities for voice assistants and comparedthem in a real world driving study. Voice assistantshave become important for a wide range of use cases, yet current interfaces are using the same style of auditory responsein every situation, despite varying user needs andpersonalities. To close this gap, we designed four assistantpersonalities (Friend, Admirer, Aunt, and Butler) and comparedthem to a baseline (Default) in a between-subject studyin real traffic conditions. Our results show higher likabilityand trust for assistants that correctly match the user’s personalitywhile we observed lower likability, trust, satisfaction,and usefulness for incorrectly matched personalities, eachin comparison with the Default character. We discuss designaspects for voice assistants in different automotive use cases.},
keywords = {Virtual Reality, Behavioural Biometrics, Motion, Relation,Proprioception, Adaptive UIs},
location = {Glasgow, UK},
numpages = {11},
timestamp = {2019.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/pfeuffer2019chi.pdf},
}
M. Braun and F. Alt, “Affective Assistants: a Matter ofStates and Traits,” in Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems, New York, NY, USA, 2019.
[BibTeX] [Abstract] [Download PDF]
This work presents a model for the development of affective assistants based on the pillars of userstates and traits. Traits are defined as long-term qualities like personality, personal experiences,preferences, and demographics, while the user state comprises cognitive load, emotional states, andphysiological parameters. We discuss useful input values and the necessary developments for anadvancement of affective assistants with the example of an affective in-car voice assistant.
@InProceedings{braun2019chiea,
author = {Michael Braun AND Florian Alt},
title = {{Affective Assistants: a Matter ofStates and Traits}},
booktitle = {{Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems}},
year = {2019},
series = {CHI EA'19},
address = {New York, NY, USA},
publisher = {ACM},
note = {braun2019chiea},
abstract = {This work presents a model for the development of affective assistants based on the pillars of userstates and traits. Traits are defined as long-term qualities like personality, personal experiences,preferences, and demographics, while the user state comprises cognitive load, emotional states, andphysiological parameters. We discuss useful input values and the necessary developments for anadvancement of affective assistants with the example of an affective in-car voice assistant.},
comment = {braun2019chiea},
location = {Glasgow, UK},
timestamp = {2019.05.03},
url = {http://florian-alt.org/unibw/wp-content/publications/braun2019chiea.pdf},
}
S. Faltaous, G. Haas, L. Barrios, A. Seiderer, S. F. Rauh, H. J. Chae, S. Schneegass, and F. Alt, “BrainShare: A Glimpse of SocialInteraction for Locked-in SyndromePatients,” in Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems, New York, NY, USA, 2019.
[BibTeX] [Abstract] [Download PDF]
This work presents a model for the development of affective assistants based on the pillars of userstates and traits. Traits are defined as long-term qualities like personality, personal experiences,preferences, and demographics, while the user state comprises cognitive load, emotional states, andphysiological parameters. We discuss useful input values and the necessary developments for anadvancement of affective assistants with the example of an affective in-car voice assistant.
@InProceedings{faltaous2019chiea,
author = {Sarah Faltaous AND Gabriel Haas AND Liliana Barrios AND Andreas Seiderer AND Sebastian Felix Rauh AND Han Joo Chae AND Stefan Schneegass AND Florian Alt},
title = {{BrainShare: A Glimpse of SocialInteraction for Locked-in SyndromePatients}},
booktitle = {{Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems}},
year = {2019},
series = {CHI EA'19},
address = {New York, NY, USA},
publisher = {ACM},
note = {faltaous2019chiea},
abstract = {This work presents a model for the development of affective assistants based on the pillars of userstates and traits. Traits are defined as long-term qualities like personality, personal experiences,preferences, and demographics, while the user state comprises cognitive load, emotional states, andphysiological parameters. We discuss useful input values and the necessary developments for anadvancement of affective assistants with the example of an affective in-car voice assistant.},
comment = {faltaous2019chiea},
location = {Glasgow, UK},
timestamp = {2019.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/faltaous2019chiea.pdf},
}
S. Prange, D. Buschek, K. Pfeuffer, L. Mecke, P. Ehrich, J. Le, and F. Alt, “Go for GOLD: Investigating UserBehaviour in Goal-Oriented Tasks,” in Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems, New York, NY, USA, 2019.
[BibTeX] [Abstract] [Download PDF]
Building adaptive support systems requires a deep understanding of why users get stuck or faceproblems during a goal-oriented task and how they perceive such situations. To investigate this, wefirst chart a problem space, comprising different problem characteristics (complexity, time, availablemeans, and consequences). Secondly, we map them to LEGO assembly tasks. We apply these in alab study equipped with several tracking technologies (i.e., smartwatch sensors and an OptiTracksetup) to assess which problem characteristics lead to measurable consequences in user behaviour.Participants rated occurred problems after each task. With this work, we suggest first steps towardsa) understanding user behaviour in problem situation and b) building upon this knowledge to informthe design of adaptive support systems. As a result, we provide the GOLD dataset (Goal-OrientedLego Dataset) for further analysis.
@InProceedings{prange2019chiea,
author = {Sarah Prange AND Daniel Buschek AND Ken Pfeuffer AND Lukas Mecke AND Peter Ehrich AND Jens Le AND Florian Alt},
title = {{Go for GOLD: Investigating UserBehaviour in Goal-Oriented Tasks}},
booktitle = {{Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems}},
year = {2019},
series = {CHI EA'19},
address = {New York, NY, USA},
publisher = {ACM},
note = {prange2019chiea},
abstract = {Building adaptive support systems requires a deep understanding of why users get stuck or faceproblems during a goal-oriented task and how they perceive such situations. To investigate this, wefirst chart a problem space, comprising different problem characteristics (complexity, time, availablemeans, and consequences). Secondly, we map them to LEGO assembly tasks. We apply these in alab study equipped with several tracking technologies (i.e., smartwatch sensors and an OptiTracksetup) to assess which problem characteristics lead to measurable consequences in user behaviour.Participants rated occurred problems after each task. With this work, we suggest first steps towardsa) understanding user behaviour in problem situation and b) building upon this knowledge to informthe design of adaptive support systems. As a result, we provide the GOLD dataset (Goal-OrientedLego Dataset) for further analysis.},
comment = {prange2019chiea},
location = {Glasgow, UK},
timestamp = {2019.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/prange2019chiea.pdf},
}
M. Braun, R. Chadowitz, and F. A. Alt, “User Experience of Driver State Visualizations: a Look at Demographics and Personalities,” in Proceedings of the 17th ifip tc.13 international conference on human-computer interaction, Berlin-Heidelberg, Germany, 2019.
[BibTeX] [Abstract] [Download PDF]
Driver state detection is an emerging topic for automotive user interfaces. Motivated by the trend of self-tracking, one crucial question within this eld is how or whether detected states should be displayed. In this work we investigate the impact of demographics and personality traits on the user experience of driver state visualizations. 328 participants experienced three concepts visualizing their current state in a publicly installed driving simulator. Driver age, experience, and personality traits were shown to have impact on visualization preferences. While a continuous display was generally preferred, older respondents and drivers with little experience favored a system with less visual elements. Extroverted participants were more open towards interventions. Our fi ndings lead us to believe that, while users are generally open to driver state detection, its visualization should be adapted to age, driving experience, and personality. This work is meant to support professionals and researchers designing affective in-car information systems.
@InProceedings{braun2019interact1,
author = {Braun, Michael and Chadowitz, Ronee and Alt, Florian Alt},
title = {{User Experience of Driver State Visualizations: a Look at Demographics and Personalities}},
booktitle = {Proceedings of the 17th IFIP TC.13 International Conference on Human-Computer Interaction},
year = {2019},
series = {INTERACT '19},
address = {Berlin-Heidelberg, Germany},
month = {4},
publisher = {Springer},
note = {braun2019interact1},
abstract = {Driver state detection is an emerging topic for automotive user interfaces. Motivated by the trend of self-tracking, one crucial question within this eld is how or whether detected states should be displayed. In this work we investigate the impact of demographics and personality traits on the user experience of driver state visualizations. 328 participants experienced three concepts visualizing their current state in a publicly installed driving simulator. Driver age, experience, and personality traits were shown to have impact on visualization preferences. While a continuous display was generally preferred, older respondents and drivers with little experience favored a system with less visual elements. Extroverted participants were more open towards interventions. Our findings lead us to believe that, while users are generally open to driver state detection, its visualization should be adapted to age, driving experience, and personality. This work is meant to support professionals and researchers designing affective in-car information systems.},
day = {1},
keywords = {Affective Computing, Emotion Detection, Demographics, Personality, Driver State Visualization, Automotive User Interfaces},
language = {English},
location = {Paphos, Cyprus},
owner = {florian},
timestamp = {2019.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019interact1.pdf},
}
M. Hassib, M. Braun, B. Pfleging, and F. Alt, “Detecting and influencing driver emotions using psycho-physiological sensors and ambient light,” in Proceedings of the 17th ifip tc.13 international conference on human-computer interaction, Berlin-Heidelberg, Germany, 2019.
[BibTeX] [Abstract] [Download PDF]
Driving is a sensitive task that is strongly affected by the driver’s emotions. Negative emotions, such as anger, can evidently lead to more driving errors. In this work, we introduce a concept of detecting and influencing driver emotions using psycho-physiological sensing for emotion classi cation and ambient light for feedback. We detect arousal and valence of emotional responses from wearable bio-electric sensors, namely brain-computer interfaces and heart rate sensors. We evaluated our concept in a static driving simulator with a fully equipped car with 12 participants. Before the rides, we elicit negative emotions and evaluate driving performance and physiological data while driving under stressful conditions. We use three ambient lighting conditions (no light, blue, orange). Using a subject-dependent random forests classi er with 40 features collected from physiological data we achieve an average accuracy of 78.9\% for classifying valence and 68.7\% for arousal. Driving performance was enhanced in conditions where ambient lighting was introduced. Both blue and orange light helped drivers to improve lane keeping. We discuss insights from our study and provide design recommendations for designing emotion sensing and feedback systems in the car.
@InProceedings{braun2019interact2,
author = {Mariam Hassib and Michael Braun and Bastian Pfleging and Florian Alt},
title = {{Detecting and influencing driver emotions using psycho-physiological sensors and ambient light}},
booktitle = {Proceedings of the 17th IFIP TC.13 International Conference on Human-Computer Interaction},
year = {2019},
series = {INTERACT '19},
address = {Berlin-Heidelberg, Germany},
month = {4},
publisher = {Springer},
note = {braun2019interact2},
abstract = {Driving is a sensitive task that is strongly affected by the driver's emotions. Negative emotions, such as anger, can evidently lead to more driving errors. In this work, we introduce a concept of detecting and influencing driver emotions using psycho-physiological sensing for emotion classication and ambient light for feedback. We detect arousal and valence of emotional responses from wearable bio-electric sensors, namely brain-computer interfaces and heart rate sensors. We evaluated our concept in a static driving simulator with a fully equipped car with 12 participants. Before the rides, we elicit negative emotions and evaluate driving performance and physiological data while driving under stressful conditions. We use three ambient lighting conditions (no light, blue, orange). Using a subject-dependent random forests classier with 40 features collected from physiological data we achieve an average accuracy of 78.9\% for classifying valence and 68.7\% for arousal. Driving performance was enhanced in conditions where ambient lighting was introduced. Both
blue and orange light helped drivers to improve lane keeping. We discuss insights from our study and provide design recommendations for designing emotion sensing and feedback systems in the car.},
day = {1},
keywords = {Affective Computing, Automotive UI, EEG, Ambient Light},
language = {English},
location = {Paphos, Cyprus},
owner = {florian},
timestamp = {2019.09.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019interact2.pdf},
}
M. Braun, N. Broy, B. Pfleging, and F. Alt, “Visualizing natural language interaction for conversational in-vehicle information systems to minimize driver distraction,” Journal on multimodal user interfaces, vol. 13, iss. 2, p. 71–88, 2019. doi:10.1007/s12193-019-00301-2
[BibTeX] [Abstract] [Download PDF]
In this paper we investigate how natural language interfaces can be integrated with cars in a way such that their influence on driving performance is being minimized. In particular, we focus on how speech-based interaction can be supported through a visualization of the conversation. Our work is motivated by the fact that speech interfaces (like Alexa, Siri, Cortana, etc.) are increasingly finding their way into our everyday life. We expect such interfaces to become commonplace in vehicles in the future. Cars are a challenging environment, since speech interaction here is a secondary task that should not negatively affect the primary task, that is driving. At the outset of our work, we identify the design space for such interfaces. We then compare different visualization concepts in a driving simulator study with 64 participants. Our results yield that (1) text summaries support drivers in recalling information and enhances user experience but can also increase distraction, (2) the use of keywords minimizes cognitive load and influence on driving performance, and (3) the use of icons increases the attractiveness of the interface.
@Article{braun2019JMUI,
author = {Braun, Michael and Broy, Nora and Pfleging, Bastian and Alt, Florian},
title = {{Visualizing natural language interaction for conversational in-vehicle information systems to minimize driver distraction}},
journal = {Journal on Multimodal User Interfaces},
year = {2019},
volume = {13},
number = {2},
pages = {71--88},
month = jun,
issn = {1783-8738},
note = {braun2019JMUI},
abstract = {In this paper we investigate how natural language interfaces can be integrated with cars in a way such that their influence on driving performance is being minimized. In particular, we focus on how speech-based interaction can be supported through a visualization of the conversation. Our work is motivated by the fact that speech interfaces (like Alexa, Siri, Cortana, etc.) are increasingly finding their way into our everyday life. We expect such interfaces to become commonplace in vehicles in the future. Cars are a challenging environment, since speech interaction here is a secondary task that should not negatively affect the primary task, that is driving. At the outset of our work, we identify the design space for such interfaces. We then compare different visualization concepts in a driving simulator study with 64 participants. Our results yield that (1) text summaries support drivers in recalling information and enhances user experience but can also increase distraction, (2) the use of keywords minimizes cognitive load and influence on driving performance, and (3) the use of icons increases the attractiveness of the interface.},
day = {01},
doi = {10.1007/s12193-019-00301-2},
owner = {florian},
timestamp = {2019.04.16},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019JMUI.pdf},
}
M. Braun, J. Schubert, B. Pfleging, and F. Alt, “Improving Driver Emotions with Affective Strategies,” Multimodal technologies and interaction, vol. 3, iss. 1, 2019. doi:10.3390/mti3010021
[BibTeX] [Abstract] [Download PDF]
Drivers in negative emotional states, such as anger or sadness, are prone to perform bad at driving, decreasing overall road safety for all road users. Recent advances in affective computing, however, allow for the detection of such states and give us tools to tackle the connected problems within automotive user interfaces. We see potential in building a system which reacts upon possibly dangerous driver states and influences the driver in order to drive more safely. We compare different interaction approaches for an affective automotive interface, namely Ambient Light, Visual Notification, a Voice Assistant, and an Empathic Assistant. Results of a simulator study with 60 participants (30 each with induced sadness/anger) indicate that an emotional voice assistant with the ability to empathize with the user is the most promising approach as it improves negative states best and is rated most positively. Qualitative data also shows that users prefer an empathic assistant but also resent potential paternalism. This leads us to suggest that digital assistants are a valuable platform to improve driver emotions in automotive environments and thereby enable safer driving.
@Article{braun2019mdpi,
author = {Braun, Michael and Schubert, Jonas and Pfleging, Bastian and Alt, Florian},
title = {{Improving Driver Emotions with Affective Strategies}},
journal = {Multimodal Technologies and Interaction},
year = {2019},
volume = {3},
number = {1},
issn = {2414-4088},
note = {braun2019mdpi},
abstract = {Drivers in negative emotional states, such as anger or sadness, are prone to perform bad at driving, decreasing overall road safety for all road users. Recent advances in affective computing, however, allow for the detection of such states and give us tools to tackle the connected problems within automotive user interfaces. We see potential in building a system which reacts upon possibly dangerous driver states and influences the driver in order to drive more safely. We compare different interaction approaches for an affective automotive interface, namely Ambient Light, Visual Notification, a Voice Assistant, and an Empathic Assistant. Results of a simulator study with 60 participants (30 each with induced sadness/anger) indicate that an emotional voice assistant with the ability to empathize with the user is the most promising approach as it improves negative states best and is rated most positively. Qualitative data also shows that users prefer an empathic assistant but also resent potential paternalism. This leads us to suggest that digital assistants are a valuable platform to improve driver emotions in automotive environments and thereby enable safer driving.},
article-number = {21},
doi = {10.3390/mti3010021},
owner = {florian},
timestamp = {2019.04.16},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2019mdpi.pdf},
}
H. Drewes, K. Pfeuffer, and F. Alt, “Time- and Space-efficient Eye Tracker Calibration,” in Proceedings of the 2019 acm symposium on eye tracking research & applications, New York, NY, USA, 2019. doi:10.1145/3314111.3319818
[BibTeX] [Abstract] [Download PDF]
One of the obstacles to bring eye tracking technology to everyday human computer interactions is the time consuming calibration procedure. In this paper we investigate a novel calibration method based on smooth pursuit eye movement. The method uses linear regression to calculate the calibration mapping. The advantage is that users can perform the calibration quickly in a few seconds and only use a small calibration area to cover a large tracking area. We first describe the theoretical background on establishing a calibration mapping and discuss differences of calibration methods used. We then present a user study comparing the new regression based method with a classical nine-point and with other pursuit based calibrations. The results show the proposed method is fully functional, quick, and enables accurate tracking of a large area. The method has the potential to be integrated into current eye tracking systems to make them more usable in various use cases.
@InProceedings{drewes2019etra,
author = {Drewes, Heiko and Pfeuffer, Ken and Alt, Florian},
title = {{Time- and Space-efficient Eye Tracker Calibration}},
booktitle = {Proceedings of the 2019 ACM Symposium on Eye Tracking Research \& Applications},
year = {2019},
series = {ETRA '19},
address = {New York, NY, USA},
publisher = {ACM},
note = {drewes2019etra},
abstract = {One of the obstacles to bring eye tracking technology to everyday human computer interactions is the time consuming calibration procedure. In this paper we investigate a novel calibration method based on smooth pursuit eye movement. The method uses linear regression to calculate the calibration mapping. The advantage is that users can perform the calibration quickly in a few seconds and only use a small calibration area to cover a large tracking area. We first describe the theoretical background on establishing a calibration mapping and discuss differences of calibration methods used. We then present a user study comparing the new regression based method with a classical nine-point and with other pursuit based calibrations. The results show the proposed method is fully functional, quick, and enables accurate tracking of a large area. The method has the potential to be integrated into current eye tracking systems to make them more usable in various use cases.},
acmid = {3319818},
doi = {10.1145/3314111.3319818},
isbn = {978-1-4503-6709-7},
keywords = {eye-tracking, calibration, eye-tracker, smooth pursuit, eye movement},
location = {Denver, CO, USA},
numpages = {8},
owner = {florian},
timestamp = {2019.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/drewes2019etra.pdf},
}
G. Ceenu, P. Janssen, D. Heuss, and F. Alt, “Should I Interrupt or Not? Understanding Interruptions in Head-Mounted Display Settings,” in Proceedings of the 2019 acm conference on designing interactive systems, New York, NY, USA, 2019.
[BibTeX] [Abstract] [Download PDF]
Head-mounted displays (HMDs) are being used for VR and AR applications and increasingly permeate our everyday life. At the same time, a detailed understanding of interruptions in settings where people wearing an HMD (HMD user) and people not wearing an HMD (bystander) is missing. We investigate (a) whether bystanders are capable of identifying when HMD users switch tasks by observing their gestures, and hence exploit opportune moments for interruptions, and (b) which strategies bystanders employ. In a lab study (N=64) we found that bystanders are able to successfully identify both task switches (83\%) and tasks (77\%) within only a few seconds of the task switch. Furthermore, we identified interruption strategies of bystanders. From our results we derive implications meant to support designers and practitioners in building HMD applications that are used in a co-located collaborative setting.
@InProceedings{george2019dis,
author = {Ceenu, George and Janssen, Philipp and Heuss, David and Alt, Florian},
title = {{Should I Interrupt or Not? Understanding Interruptions in Head-Mounted Display Settings}},
booktitle = {Proceedings of the 2019 ACM Conference on Designing Interactive Systems},
year = {2019},
series = {DIS '19},
address = {New York, NY, USA},
publisher = {ACM},
note = {george2019dis},
abstract = {Head-mounted displays (HMDs) are being used for VR and AR applications and increasingly permeate our everyday life. At the same time, a detailed understanding of interruptions in settings where people wearing an HMD (HMD user) and
people not wearing an HMD (bystander) is missing. We investigate (a) whether bystanders are capable of identifying when HMD users switch tasks by observing their gestures, and hence exploit opportune moments for interruptions, and (b) which strategies bystanders employ. In a lab study (N=64) we found that bystanders are able to successfully identify both task switches (83\%) and tasks (77\%) within only a few seconds of the task switch. Furthermore, we identified interruption strategies of bystanders. From our results we derive implications meant to support designers and practitioners in building HMD applications that are used in a co-located collaborative setting.},
isbn = {123-4567-24-567},
keywords = {HMD, Gesture, Interruption, Virtual and Augmented Reality},
location = {San Diego, CA, USA},
owner = {florian},
timestamp = {2019.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/george2019dis.pdf},
}
S. Prange, C. Tiefenau, E. von Zezschwitz, and F. Alt, “Towards Understanding User Interaction in Future Smart Homes,” in Proceedings of chi ’19 workshop on new directions for the iot: automate, share, build, and care, New York, NY, USA, 2019.
[BibTeX] [Abstract] [Download PDF]
IoT devices are currently finding their way into peoples homes, providing rich functionality by means of various interaction modalities. We see great potential in collecting and analysing data about users’ interaction with their smart home devices to gain insights about their daily life behaviour for self-reflection as well as security purposes. We present a methodology to study interaction with IoT devices in users’ (smart) homes. Logging daily behaviour usually comes with high effort and often interrupts natural interaction. Hence, we suggest an unobtrusive logging approach by means of a smartwatch and NFC technology. Participants scan interaction with devices using self-placed NFC tags. We tested our method with two flat shares in two cities and provide preliminary insights with regards to the strengths and weaknesses of our study approach.
@InProceedings{prange2019iot,
author = {Sarah Prange AND Christian Tiefenau AND Emanuel von Zezschwitz AND Florian Alt},
title = {{Towards Understanding User Interaction in Future Smart Homes}},
booktitle = {Proceedings of CHI '19 Workshop on New Directions for the IoT: Automate, Share, Build, and Care},
year = {2019},
series = {CHI '19 Workshop},
address = {New York, NY, USA},
publisher = {ACM},
note = {prange2019iot},
abstract = {IoT devices are currently finding their way into peoples homes, providing rich functionality by means of various interaction modalities. We see great potential in collecting and analysing data about users' interaction with their smart home devices to gain insights about their daily life behaviour for self-reflection as well as security purposes. We present a methodology to study interaction with IoT devices in users' (smart) homes. Logging daily behaviour usually comes with high effort and often interrupts natural interaction. Hence, we suggest an unobtrusive logging approach by means of a smartwatch and NFC technology. Participants scan interaction with devices using self-placed NFC tags. We tested our method with two flat shares in two cities and provide preliminary insights with regards to the strengths and weaknesses of our study approach.},
keywords = {IoT, Internet of Things, Smart Home, Smart Devices, NFC, Android, Field Study, Data Collection, In-the-wild},
location = {Glasgow, UK},
numpages = {5},
owner = {florian},
timestamp = {2019.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2019iot.pdf},
}
S. R. R. Rivu, Y. Abdrabou, T. Mayer, K. Pfeuffer, and F. Alt, “GazeButton: Enhancing Buttons with Eye Gaze Interactions,” in Proceedings of the 2019 acm symposium on eye tracking research & applications, New York, NY, USA, 2019.
[BibTeX] [Abstract] [Download PDF]
The button is an element of a user interface to trigger an action, traditionally using click or touch.We introduce GazeButton, a novel concept extending the default button mode with advanced gazebased interactions. During normal interaction, users can utilise this button as a universal hub for gaze-based UI shortcuts. The advantages are: 1) easy to integrate in existing UIs, 2) complementary, as users choose either gaze or manual interaction, 3) straightforward, as all features are located in one button, and 4) one button to interact with the whole screen. We explore GazeButtons for a text editing tool on a multitouch tablet. For example, this allows the text cursor position to be set as users look at the position and tap on the GazeButton, avoiding costly physical movement. We present a design space, specific application examples, and point to future button designs that become highly expressive by unifying the user’s visual and manual input.
@InProceedings{rivu2019cogain,
author = {Sheikh Radiah Rahim Rivu AND Yasmeen Abdrabou AND Thomas Mayer AND Ken Pfeuffer AND Florian Alt},
title = {{GazeButton: Enhancing Buttons with Eye Gaze Interactions}},
booktitle = {Proceedings of the 2019 ACM Symposium on Eye Tracking Research \& Applications},
year = {2019},
series = {COGAIN '19},
address = {New York, NY, USA},
publisher = {ACM},
note = {rivu2019cogain},
abstract = {The button is an element of a user interface to trigger an action, traditionally using click or touch.We introduce GazeButton, a novel concept extending the default button mode with advanced gazebased interactions. During normal interaction, users can utilise this button as a universal hub for gaze-based UI shortcuts. The advantages are: 1) easy to integrate in existing UIs, 2) complementary, as users choose either gaze or manual interaction, 3) straightforward, as all features are located in one button, and 4) one button to interact with the whole screen. We explore GazeButtons for a text editing tool on a multitouch tablet. For example, this allows the text cursor position to be set as users look at the position and tap on the GazeButton, avoiding costly physical movement. We present a design space, specific application examples, and point to future button designs that become highly expressive by unifying the user's visual and manual input.},
keywords = {Interaction Modality, Text Input, Touch and Gaze},
location = {Denver, CO, USA},
numpages = {7},
owner = {florian},
timestamp = {2019.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/rivu2019cogain.pdf},
}
R. Häuslschmid, D. Ren, F. Alt, A. Butz, and T. Höllerer, “Personalizing content presentation on large 3d head-up displays,” Presence: virtual and augmented reality, vol. 27, iss. 1, pp. 80-106, 2019. doi:10.1162/pres_a_00315
[BibTeX] [Abstract] [Download PDF]
Drivers’ urge to access content on smartphones while driving causes a high number of fatal accidents every year. We explore 3D full-windshield size head-up displays as an opportunity to present such content in a safer manner. In particular, we look into how drivers would personalize such displays and whether it can be considered safe. Firstly, by means of an online survey we identify types of content users access on their smartphones while driving and whether users are interested in the same content on a head-up display. Secondly, we let drivers design personalized 3D layouts and assess how personalization impacts on driving safety. Thirdly, we compare personalized layouts to a one-fits-all layout concept in a 3D driving simulator study regarding safety. We found that drivers’ content preferences diverge largely and that most of the personalized layouts do not respect safety sufficiently. The one-fits-all layout led to a better response performance but needs to be modified to consider the drivers’ preferences. We discuss the implications of the presented research on road safety and future 3D information placement on head-up displays.
@Article{haeuslschmid2019mti,
author = {H\"{a}uslschmid, Renate and Ren, Donhao and Alt, Florian and Butz, Andreas and H\"{o}llerer, Tobias},
title = {Personalizing Content Presentation on Large 3D Head-Up Displays},
journal = {PRESENCE: Virtual and Augmented Reality},
year = {2019},
volume = {27},
number = {1},
pages = {80-106},
note = {haeuslschmid2019mti},
abstract = { Drivers' urge to access content on smartphones while driving causes a high number of fatal accidents every year. We explore 3D full-windshield size head-up displays as an opportunity to present such content in a safer manner. In particular, we look into how drivers would personalize such displays and whether it can be considered safe. Firstly, by means of an online survey we identify types of content users access on their smartphones while driving and whether users are interested in the same content on a head-up display. Secondly, we let drivers design personalized 3D layouts and assess how personalization impacts on driving safety. Thirdly, we compare personalized layouts to a one-fits-all layout concept in a 3D driving simulator study regarding safety. We found that drivers' content preferences diverge largely and that most of the personalized layouts do not respect safety sufficiently. The one-fits-all layout led to a better response performance but needs to be modified to consider the drivers' preferences. We discuss the implications of the presented research on road safety and future 3D information placement on head-up displays. },
doi = {10.1162/pres\_a\_00315},
eprint = {https://www.mitpressjournals.org/doi/pdf/10.1162/pres_a_00315},
owner = {florian},
timestamp = {2019.04.16},
url = {http://www.florian-alt.org/unibw/wp-content/publications/haeuslschmid2019mti.pdf},
}
L. Mecke, S. D. Rodriguez, D. Buschek, S. Prange, and F. Alt, “Communicating device confidence level and upcoming re-authentications in continuous authentication systems on mobile devices,” in Proceedings of the fifteenth symposium on usable privacy and security, Santa Clara, CA, 2019.
[BibTeX] [Download PDF]
@InProceedings{mecke2019soups1,
author = {Lukas Mecke and Sarah Delgado Rodriguez and Daniel Buschek and Sarah Prange and Florian Alt},
title = {Communicating Device Confidence Level and Upcoming Re-Authentications in Continuous Authentication Systems on Mobile Devices},
booktitle = {Proceedings of the Fifteenth Symposium on Usable Privacy and Security},
year = {2019},
series = {({SOUPS} 2019)},
address = {Santa Clara, CA},
publisher = {{USENIX} Association},
note = {mecke2019soups1},
timestamp = {2019.08.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2019soups2.pdf},
}
L. Mecke, D. Buschek, M. Kiermeier, S. Prange, and F. Alt, “Exploring intentional behaviour modifications for password typing on mobile touchscreen devices,” in Proceedings of the fifteenth symposium on usable privacy and security, Santa Clara, CA, 2019.
[BibTeX] [Download PDF]
@InProceedings{mecke2019soups2,
author = {Lukas Mecke and Daniel Buschek and Mathias Kiermeier and Sarah Prange and Florian Alt},
title = {Exploring Intentional Behaviour Modifications for Password Typing on Mobile Touchscreen Devices},
booktitle = {Proceedings of the Fifteenth Symposium on Usable Privacy and Security},
year = {2019},
series = {({SOUPS} 2019)},
address = {Santa Clara, CA},
publisher = {{USENIX} Association},
note = {mecke2019soups2},
timestamp = {2019.08.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2019soups2.pdf},
}
S. Prange, Y. Abdrabou, L. Mecke, and F. Alt, “Hidden in plain sight:using lockscreen content forauthentication on mobile devices,” in Proceedings of the fifteenth symposium on usable privacy and security, Santa Clara, CA, 2019.
[BibTeX] [Download PDF]
@InProceedings{prange2019soupsadj,
author = {Sarah Prange AND Yasmeen Abdrabou AND Lukas Mecke and Florian Alt},
title = {Hidden in Plain Sight:Using Lockscreen Content forAuthentication on Mobile Devices},
booktitle = {Proceedings of the Fifteenth Symposium on Usable Privacy and Security},
year = {2019},
series = {SOUPS 2019},
address = {Santa Clara, CA},
publisher = {{USENIX} Association},
note = {prange2019soupsadj},
timestamp = {2019.08.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2019soupsadj.pdf},
}
M. Kattenbeck, M. A. Kilian, M. Ferstl, B. Ludwig, and F. Alt, “Towards task-sensitive assistance in public spaces,” Aslib journal of information management, 2019.
[BibTeX] [Abstract] [Download PDF]
Purpose Performing tasks in public spaces can be demanding due to task complexity. Systems that can keep track of the current task state may help their users to successfully fulfill a task. These systems, however, require major implementation effort. The purpose of this paper is to investigate if and how a mobile information assistant which has only basic task-tracking capabilities can support users by employing a least effort approach. This means, we are interested in whether such a system is able to have an impact on the way a workflow in public space is perceived. Design/methodology/approach The authors implement and test AIRBOT, a mobile chatbot application that can assist air passengers in successfully boarding a plane. The authors apply a three-tier approach and, first, conduct expert and passenger interviews to understand the workflow and the information needs occurring therein; second, the authors implement a mobile chatbot application providing minimum task-tracking capabilities to support travelers by providing boarding-relevant information in a proactive manner. Finally, the authors evaluate this application by means of an in situ study (n = 101 passengers) at a major European airport. Findings The authors provide evidence that basic task-tracking capabilities are sufficient to affect the users? task perception. AIRBOT is able to decrease the perceived workload airport services impose on users. It has a negative impact on satisfaction with non-personalized information offered by the airport, though. Originality/value The study shows that the number of features is not the most important means to successfully provide assistance in public space workflows. The study can, moreover, serve as a blueprint to design task-based assistants for other contexts.
@Article{kattenbeck2019ajim,
author = {Markus Kattenbeck and Melanie A Kilian and Matthias Ferstl and Bernd Ludwig and Florian Alt},
title = {Towards task-sensitive assistance in public spaces},
journal = {Aslib Journal of Information Management},
year = {2019},
note = {kattenbeck2019ajim},
abstract = {Purpose
Performing tasks in public spaces can be demanding due to task complexity. Systems that can keep track of the current task state may help their users to successfully fulfill a task. These systems, however, require major implementation effort. The purpose of this paper is to investigate if and how a mobile information assistant which has only basic task-tracking capabilities can support users by employing a least effort approach. This means, we are interested in whether such a system is able to have an impact on the way a workflow in public space is perceived.
Design/methodology/approach
The authors implement and test AIRBOT, a mobile chatbot application that can assist air passengers in successfully boarding a plane. The authors apply a three-tier approach and, first, conduct expert and passenger interviews to understand the workflow and the information needs occurring therein; second, the authors implement a mobile chatbot application providing minimum task-tracking capabilities to support travelers by providing boarding-relevant information in a proactive manner. Finally, the authors evaluate this application by means of an in situ study (n = 101 passengers) at a major European airport.
Findings
The authors provide evidence that basic task-tracking capabilities are sufficient to affect the users? task perception. AIRBOT is able to decrease the perceived workload airport services impose on users. It has a negative impact on satisfaction with non-personalized information offered by the airport, though.
Originality/value
The study shows that the number of features is not the most important means to successfully provide assistance in public space workflows. The study can, moreover, serve as a blueprint to design task-based assistants for other contexts.},
keywords = {Human-computer interaction, Assistance system, Cooperative problem solving, In situ study, Mobile information behaviour, Mobile information needs},
publisher = {Emerald Publishing Limited},
url = {http://www.florian-alt.org/unibw/wp-content/publications/kattenbeck2019ajim},
}

2018

D. Buschek, B. Bisinger, and F. Alt, “ResearchIME: A Mobile Keyboard Application for Studying Free Typing Behaviour in the Wild,” in Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, New York, NY, USA, 2018, p. 255:1–255:14. doi:10.1145/3173574.3173829
[BibTeX] [Abstract] [Download PDF]
We present a data logging concept, tool, and analyses to facilitatestudies of everyday mobile touch keyboard use andfree typing behaviour: 1) We propose a filtering concept to logtyping without recording readable text and assess reactions tofilters with a survey (N=349). 2) We release an Android keyboardapp and backend that implement this concept. 3) Basedon a three-week field study (N=30), we present the first analysesof keyboard use and typing biometrics on such free texttyping data in the wild, including speed, postures, apps, autocorrection, and word suggestions. We conclude that researchon mobile keyboards benefits from observing free typing beyondthe lab and discuss ideas for further studies.
@InProceedings{buschek2018chi2,
author = {Buschek, Daniel and Bisinger, Benjamin and Alt, Florian},
title = {{ResearchIME: A Mobile Keyboard Application for Studying Free Typing Behaviour in the Wild}},
booktitle = {{Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}},
year = {2018},
series = {CHI '18},
pages = {255:1--255:14},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2018chi2},
abstract = {We present a data logging concept, tool, and analyses to facilitatestudies of everyday mobile touch keyboard use andfree typing behaviour: 1) We propose a filtering concept to logtyping without recording readable text and assess reactions tofilters with a survey (N=349). 2) We release an Android keyboardapp and backend that implement this concept. 3) Basedon a three-week field study (N=30), we present the first analysesof keyboard use and typing biometrics on such free texttyping data in the wild, including speed, postures, apps, autocorrection, and word suggestions. We conclude that researchon mobile keyboards benefits from observing free typing beyondthe lab and discuss ideas for further studies.},
acmid = {3173829},
articleno = {255},
comment = {buschek2018chi2},
doi = {10.1145/3173574.3173829},
isbn = {978-1-4503-5620-6},
keywords = {biometrics, data logging, touch keyboard, typing behaviour},
location = {Montreal QC, Canada},
numpages = {14},
timestamp = {2018.05.01},
url = {http://florian-alt.org/unibw/wp-content/publications/buschek2018chi2.pdf},
}
M. Hassib, S. Schneegass, N. Henze, A. Schmidt, and F. Alt, “A design space for audience sensing and feedback systems,” in Extended abstracts of the 2018 chi conference on human factors in computing systems, New York, NY, USA, 2018, p. LBW085:1–LBW085:6. doi:10.1145/3170427.3188569
[BibTeX] [Abstract] [Download PDF]
Audience feedback is a valuable asset in many domains such as arts, education, and marketing. Artists can receive feedback on the experiences created through their performances. Similarly, teachers can receive feedback from students on the understandability of their course content. There are various methods to collect explicit feedback (e.g., questionnaires) – yet they usually impose a burden to the audience. Advances in physiological sensing opens up opportunities for collecting feedback implicitly. This creates unexplored dimensions in the design space of audience sensing. In this work, we chart a comprehensive design space for audience sensing based on a literature and market review which aims to support the designers’ process for creating novel feedback systems.
@InProceedings{hassib2018chiea,
author = {Hassib, Mariam and Schneegass, Stefan and Henze, Niels and Schmidt, Albrecht and Alt, Florian},
title = {A Design Space for Audience Sensing and Feedback Systems},
booktitle = {Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems},
year = {2018},
series = {CHI EA '18},
pages = {LBW085:1--LBW085:6},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2018chiea},
abstract = {Audience feedback is a valuable asset in many domains such as arts, education, and marketing. Artists can receive feedback on the experiences created through their performances. Similarly, teachers can receive feedback from students on the understandability of their course content. There are various methods to collect explicit feedback (e.g., questionnaires) - yet they usually impose a burden to the audience. Advances in physiological sensing opens up opportunities for collecting feedback implicitly. This creates unexplored dimensions in the design space of audience sensing. In this work, we chart a comprehensive design space for audience sensing based on a literature and market review which aims to support the designers' process for creating novel feedback systems.},
acmid = {3188569},
articleno = {LBW085},
doi = {10.1145/3170427.3188569},
isbn = {978-1-4503-5621-3},
keywords = {affective computing, audience sensing},
location = {Montreal QC, Canada},
numpages = {6},
timestamp = {2018.04.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2018chiea.pdf},
}
T. Kosch, M. Hassib, P. W. Woźniak, D. Buschek, and F. Alt, “Your eyes tell: leveraging smooth pursuit for assessing cognitive workload,” in Proceedings of the 2018 chi conference on human factors in computing systems, New York, NY, USA, 2018, p. 436:1–436:13. doi:10.1145/3173574.3174010
[BibTeX] [Abstract] [Download PDF]
A common objective for context-aware computing systems is to predict how user interfaces impact user performance regarding their cognitive capabilities. Existing approaches such as questionnaires or pupil dilation measurements either only allow for subjective assessments or are susceptible to environmental influences and user physiology. We address these challenges by exploiting the fact that cognitive workload influences smooth pursuit eye movements. We compared three trajectories and two speeds under different levels of cognitive workload within a user study (N=20). We found higher deviations of gaze points during smooth pursuit eye movements for specific trajectory types at higher cognitive workload levels. Using an SVM classifier, we predict cognitive workload through smooth pursuit with an accuracy of 99.5% for distinguishing between low and high workload as well as an accuracy of 88.1% for estimating workload between three levels of difficulty. We discuss implications and present use cases of how cognition-aware systems benefit from inferring cognitive workload in real-time by smooth pursuit eye movements.
@InProceedings{kosch2018chi,
author = {Kosch, Thomas and Hassib, Mariam and Wo\'{z}niak, Pawe\l W. and Buschek, Daniel and Alt, Florian},
title = {Your Eyes Tell: Leveraging Smooth Pursuit for Assessing Cognitive Workload},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
year = {2018},
series = {CHI '18},
pages = {436:1--436:13},
address = {New York, NY, USA},
publisher = {ACM},
note = {kosch2018chi},
abstract = {A common objective for context-aware computing systems is to predict how user interfaces impact user performance regarding their cognitive capabilities. Existing approaches such as questionnaires or pupil dilation measurements either only allow for subjective assessments or are susceptible to environmental influences and user physiology. We address these challenges by exploiting the fact that cognitive workload influences smooth pursuit eye movements. We compared three trajectories and two speeds under different levels of cognitive workload within a user study (N=20). We found higher deviations of gaze points during smooth pursuit eye movements for specific trajectory types at higher cognitive workload levels. Using an SVM classifier, we predict cognitive workload through smooth pursuit with an accuracy of 99.5% for distinguishing between low and high workload as well as an accuracy of 88.1% for estimating workload between three levels of difficulty. We discuss implications and present use cases of how cognition-aware systems benefit from inferring cognitive workload in real-time by smooth pursuit eye movements.},
acmid = {3174010},
articleno = {436},
doi = {10.1145/3173574.3174010},
isbn = {978-1-4503-5620-6},
keywords = {cognition-aware user interfaces, cognitive workload, eye tracking, mental workload, smooth pursuit, workload-aware computing},
location = {Montreal QC, Canada},
numpages = {13},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/kosch2018chi.pdf},
}
L. Mecke, S. Prange, D. Buschek, and F. Alt, “A design space for security indicators for behavioural biometrics on mobile touchscreen devices,” in Extended abstracts of the 2018 chi conference on human factors in computing systems, New York, NY, USA, 2018, p. LBW003:1–LBW003:6. doi:10.1145/3170427.3188633
[BibTeX] [Abstract] [Download PDF]
We propose a design space for security indicators for behavioural biometrics on mobile touchscreen devices. Design dimensions are derived from a focus group with experts and a literature review. The space supports the design of indicators which aim to facilitate users’ decision making, awareness and understanding, as well as increase transparency of behavioural biometrics systems. We conclude with a set of example designs and discuss further extensions, future research questions and study ideas.
@InProceedings{mecke2018chiea,
author = {Mecke, Lukas and Prange, Sarah and Buschek, Daniel and Alt, Florian},
title = {A Design Space for Security Indicators for Behavioural Biometrics on Mobile Touchscreen Devices},
booktitle = {Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems},
year = {2018},
series = {CHI EA '18},
pages = {LBW003:1--LBW003:6},
address = {New York, NY, USA},
publisher = {ACM},
note = {mecke2018chiea},
abstract = {We propose a design space for security indicators for behavioural biometrics on mobile touchscreen devices. Design dimensions are derived from a focus group with experts and a literature review. The space supports the design of indicators which aim to facilitate users' decision making, awareness and understanding, as well as increase transparency of behavioural biometrics systems. We conclude with a set of example designs and discuss further extensions, future research questions and study ideas.},
acmid = {3188633},
articleno = {LBW003},
doi = {10.1145/3170427.3188633},
isbn = {978-1-4503-5621-3},
keywords = {behavioural biometrics, design space, focus group, mobile touchscreen devices, security indicator},
location = {Montreal QC, Canada},
numpages = {6},
timestamp = {2018.04.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2018chiea.pdf},
}
M. Khamis, C. Becker, A. Bulling, and F. Alt, “Which one is me?: identifying oneself on public displays,” in Proceedings of the 2018 chi conference on human factors in computing systems, New York, NY, USA, 2018, p. 287:1–287:12. doi:10.1145/3173574.3173861
[BibTeX] [Abstract] [Download PDF]
While user representations are extensively used on public displays, it remains unclear how well users can recognize their own representation among those of surrounding users. We study the most widely used representations: abstract objects, skeletons, silhouettes and mirrors. In a prestudy (N=12), we identify five strategies that users follow to recognize themselves on public displays. In a second study (N=19), we quantify the users’ recognition time and accuracy with respect to each representation type. Our findings suggest that there is a significant effect of (1) the representation type, (2) the strategies performed by users, and (3) the combination of both on recognition time and accuracy. We discuss the suitability of each representation for different settings and provide specific recommendations as to how user representations should be applied in multi-user scenarios. These recommendations guide practitioners and researchers in selecting the representation that optimizes the most for the deployment’s requirements, and for the user strategies that are feasible in that environment.
@InProceedings{khamis2018chi1,
author = {Khamis, Mohamed and Becker, Christian and Bulling, Andreas and Alt, Florian},
title = {Which One is Me?: Identifying Oneself on Public Displays},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
year = {2018},
series = {CHI '18},
pages = {287:1--287:12},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2018chi1},
abstract = {While user representations are extensively used on public displays, it remains unclear how well users can recognize their own representation among those of surrounding users. We study the most widely used representations: abstract objects, skeletons, silhouettes and mirrors. In a prestudy (N=12), we identify five strategies that users follow to recognize themselves on public displays. In a second study (N=19), we quantify the users' recognition time and accuracy with respect to each representation type. Our findings suggest that there is a significant effect of (1) the representation type, (2) the strategies performed by users, and (3) the combination of both on recognition time and accuracy. We discuss the suitability of each representation for different settings and provide specific recommendations as to how user representations should be applied in multi-user scenarios. These recommendations guide practitioners and researchers in selecting the representation that optimizes the most for the deployment's requirements, and for the user strategies that are feasible in that environment.},
acmid = {3173861},
articleno = {287},
doi = {10.1145/3173574.3173861},
isbn = {978-1-4503-5620-6},
keywords = {multiple users, public displays, user representations},
location = {Montreal QC, Canada},
numpages = {12},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018chi1.pdf},
}
M. Khamis, A. Baier, N. Henze, F. Alt, and A. Bulling, “Understanding face and eye visibility in front-facing cameras of smartphones used in the wild,” in Proceedings of the 2018 chi conference on human factors in computing systems, New York, NY, USA, 2018, p. 280:1–280:12. doi:10.1145/3173574.3173854
[BibTeX] [Abstract] [Download PDF]
Commodity mobile devices are now equipped with high-resolution front-facing cameras, allowing applications in biometrics (e.g., FaceID in the iPhone X), facial expression analysis, or gaze interaction. However, it is unknown how often users hold devices in a way that allows capturing their face or eyes, and how this impacts detection accuracy. We collected 25,726 in-the-wild photos, taken from the front-facing camera of smartphones as well as associated application usage logs. We found that the full face is visible about 29% of the time, and that in most cases the face is only partially visible. Furthermore, we identified an influence of users’ current activity; for example, when watching videos, the eyes but not the entire face are visible 75% of the time in our dataset. We found that a state-of-the-art face detection algorithm performs poorly against photos taken from front-facing cameras. We discuss how these findings impact mobile applications that leverage face and eye detection, and derive practical implications to address state-of-the art’s limitations.
@InProceedings{khamis2018chi2,
author = {Khamis, Mohamed and Baier, Anita and Henze, Niels and Alt, Florian and Bulling, Andreas},
title = {Understanding Face and Eye Visibility in Front-Facing Cameras of Smartphones Used in the Wild},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
year = {2018},
series = {CHI '18},
pages = {280:1--280:12},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2018chi2},
abstract = {Commodity mobile devices are now equipped with high-resolution front-facing cameras, allowing applications in biometrics (e.g., FaceID in the iPhone X), facial expression analysis, or gaze interaction. However, it is unknown how often users hold devices in a way that allows capturing their face or eyes, and how this impacts detection accuracy. We collected 25,726 in-the-wild photos, taken from the front-facing camera of smartphones as well as associated application usage logs. We found that the full face is visible about 29% of the time, and that in most cases the face is only partially visible. Furthermore, we identified an influence of users' current activity; for example, when watching videos, the eyes but not the entire face are visible 75% of the time in our dataset. We found that a state-of-the-art face detection algorithm performs poorly against photos taken from front-facing cameras. We discuss how these findings impact mobile applications that leverage face and eye detection, and derive practical implications to address state-of-the art's limitations.},
acmid = {3173854},
articleno = {280},
doi = {10.1145/3173574.3173854},
isbn = {978-1-4503-5620-6},
keywords = {eye tracking, face detection, front-facing camera, gaze estimation, in the wild study, mobile device},
location = {Montreal QC, Canada},
numpages = {12},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018chi2.pdf},
}
V. Mäkelä, M. Khamis, L. Mecke, J. James, M. Turunen, and F. Alt, “Pocket transfers: interaction techniques for transferring content from situated displays to mobile devices,” in Proceedings of the 2018 chi conference on human factors in computing systems, New York, NY, USA, 2018, p. 135:1–135:13. doi:10.1145/3173574.3173709
[BibTeX] [Abstract] [Download PDF]
We present Pocket Transfers: interaction techniques that allow users to transfer content from situated displays to a personal mobile device while keeping the device in a pocket or bag. Existing content transfer solutions require direct manipulation of the mobile device, making inter-action slower and less flexible. Our introduced tech-niques employ touch, mid-air gestures, gaze, and a mul-timodal combination of gaze and mid-air gestures. We evaluated the techniques in a novel user study (N=20), where we considered dynamic scenarios where the user approaches the display, completes the task, and leaves. We show that all pocket transfer techniques are fast and seen as highly convenient. Mid-air gestures are the most efficient touchless method for transferring a single item, while the multimodal method is the fastest touchless method when multiple items are transferred. We provide guidelines to help researchers and practitioners choose the most suitable content transfer techniques for their systems.
@InProceedings{makela2018chi,
author = {M\"{a}kel\"{a}, Ville and Khamis, Mohamed and Mecke, Lukas and James, Jobin and Turunen, Markku and Alt, Florian},
title = {Pocket Transfers: Interaction Techniques for Transferring Content from Situated Displays to Mobile Devices},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
year = {2018},
series = {CHI '18},
pages = {135:1--135:13},
address = {New York, NY, USA},
publisher = {ACM},
note = {makela2018chi},
abstract = {We present Pocket Transfers: interaction techniques that allow users to transfer content from situated displays to a personal mobile device while keeping the device in a pocket or bag. Existing content transfer solutions require direct manipulation of the mobile device, making inter-action slower and less flexible. Our introduced tech-niques employ touch, mid-air gestures, gaze, and a mul-timodal combination of gaze and mid-air gestures. We evaluated the techniques in a novel user study (N=20), where we considered dynamic scenarios where the user approaches the display, completes the task, and leaves. We show that all pocket transfer techniques are fast and seen as highly convenient. Mid-air gestures are the most efficient touchless method for transferring a single item, while the multimodal method is the fastest touchless method when multiple items are transferred. We provide guidelines to help researchers and practitioners choose the most suitable content transfer techniques for their systems.},
acmid = {3173709},
articleno = {135},
doi = {10.1145/3173574.3173709},
isbn = {978-1-4503-5620-6},
keywords = {content transfer, cross-device interaction, gaze, mid-air gestures, multimodal, public displays, ubiquitous computing},
location = {Montreal QC, Canada},
numpages = {13},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/makela2018chi.pdf},
}
A. Colley, J. Häkkilä, M. Forsman, B. Pfleging, and F. Alt, “Car exterior surface displays: exploration in a real-world context,” in Proceedings of the 7th acm international symposium on pervasive displays, New York, NY, USA, 2018, p. 7:1–7:8. doi:10.1145/3205873.3205880
[BibTeX] [Abstract] [Download PDF]
Current changes in the automotive industry towards autonomous vehicles will spur wide ranging changes in the roles of cars in urban environments. When combined with advances in display technology, this creates potential for the outer surfaces of cars to act as public displays. We present a real-world in context study, where participants ideated on a variety of different types of informative content, displayed on or around vehicles. Our study approach utilized handheld projection to create visualization experiences suggestive of the capabilities of future display technologies. The salient findings show that ideas related to the car and the driving function, such as parking, warning pedestrians and changing the vehicles aesthetic appearance, were appreciated. In contrast, ideas where the vehicle formed part of a smart urban infrastructure, such as guiding pedestrians or acting as a public display caused diverse opinions. In particular, concepts where personalized content was shown were disliked for reasons related to privacy and feeling like ‘big brother’ is watching.
@InProceedings{colley2018perdis,
author = {Colley, Ashley and H\"{a}kkil\"{a}, Jonna and Forsman, Meri-Tuulia and Pfleging, Bastian and Alt, Florian},
title = {Car Exterior Surface Displays: Exploration in a Real-World Context},
booktitle = {Proceedings of the 7th ACM International Symposium on Pervasive Displays},
year = {2018},
series = {PerDis '18},
pages = {7:1--7:8},
address = {New York, NY, USA},
publisher = {ACM},
note = {colley2018perdis},
abstract = {Current changes in the automotive industry towards autonomous vehicles will spur wide ranging changes in the roles of cars in urban environments. When combined with advances in display technology, this creates potential for the outer surfaces of cars to act as public displays. We present a real-world in context study, where participants ideated on a variety of different types of informative content, displayed on or around vehicles. Our study approach utilized handheld projection to create visualization experiences suggestive of the capabilities of future display technologies. The salient findings show that ideas related to the car and the driving function, such as parking, warning pedestrians and changing the vehicles aesthetic appearance, were appreciated. In contrast, ideas where the vehicle formed part of a smart urban infrastructure, such as guiding pedestrians or acting as a public display caused diverse opinions. In particular, concepts where personalized content was shown were disliked for reasons related to privacy and feeling like 'big brother' is watching.},
acmid = {3205880},
articleno = {7},
doi = {10.1145/3205873.3205880},
isbn = {978-1-4503-5765-4},
keywords = {Automotive UI, interactive surfaces, pedestrian guidance, pervasive navigation, projected AR, public displays, spatial augmented reality},
location = {Munich, Germany},
numpages = {8},
timestamp = {2018.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/colley2018perdis.pdf},
}
M. Khamis, C. Oechsner, F. Alt, and A. Bulling, “Vrpursuits: interaction in virtual reality using smooth pursuit eye movements,” in Proceedings of the 2018 international conference on advanced visual interfaces, New York, NY, USA, 2018, p. 18:1–18:8. doi:10.1145/3206505.3206522
[BibTeX] [Abstract] [Download PDF]
Gaze-based interaction using smooth pursuit eye movements (Pursuits) is attractive given that it is intuitive and overcomes the Midas touch problem. At the same time, eye tracking is becoming increasingly popular for VR applications. While Pursuits was shown to be effective in several interaction contexts, it was never explored in-depth for VR before. In a user study (N=26), we investigated how parameters that are specific to VR settings influence the performance of Pursuits. For example, we found that Pursuits is robust against different sizes of virtual 3D targets. However performance improves when the trajectory size (e.g., radius) is larger, particularly if the user is walking while interacting. While walking, selecting moving targets via Pursuits is generally feasible albeit less accurate than when stationary. Finally, we discuss the implications of these findings and the potential of smooth pursuits for interaction in VR by demonstrating two sample use cases: 1) gaze-based authentication in VR, and 2) a space meteors shooting game.
@InProceedings{khamis2018avi,
author = {Khamis, Mohamed and Oechsner, Carl and Alt, Florian and Bulling, Andreas},
title = {VRpursuits: Interaction in Virtual Reality Using Smooth Pursuit Eye Movements},
booktitle = {Proceedings of the 2018 International Conference on Advanced Visual Interfaces},
year = {2018},
series = {AVI '18},
pages = {18:1--18:8},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2018avi},
abstract = {Gaze-based interaction using smooth pursuit eye movements (Pursuits) is attractive given that it is intuitive and overcomes the Midas touch problem. At the same time, eye tracking is becoming increasingly popular for VR applications. While Pursuits was shown to be effective in several interaction contexts, it was never explored in-depth for VR before. In a user study (N=26), we investigated how parameters that are specific to VR settings influence the performance of Pursuits. For example, we found that Pursuits is robust against different sizes of virtual 3D targets. However performance improves when the trajectory size (e.g., radius) is larger, particularly if the user is walking while interacting. While walking, selecting moving targets via Pursuits is generally feasible albeit less accurate than when stationary. Finally, we discuss the implications of these findings and the potential of smooth pursuits for interaction in VR by demonstrating two sample use cases: 1) gaze-based authentication in VR, and 2) a space meteors shooting game.},
acmid = {3206522},
articleno = {18},
doi = {10.1145/3206505.3206522},
isbn = {978-1-4503-5616-9},
keywords = {eye tracking, gaze interaction, pursuits, virtual reality},
location = {Castiglione della Pescaia, Grosseto, Italy},
numpages = {8},
timestamp = {2018.05.31},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018avi.pdf},
}
F. Alt, S. Geiger, and W. Höhl, “Shapelineguide: teaching mid-air gestures for large interactive displays,” in Proceedings of the 7th acm international symposium on pervasive displays, New York, NY, USA, 2018, p. 3:1–3:8. doi:10.1145/3205873.3205887
[BibTeX] [Abstract] [Download PDF]
We present ShapelineGuide, a dynamic visual guide that supports users of large interactive displays while performing mid-air gestures. Today, we find many examples of large displays supporting interaction through gestures performed in Mid-air. Yet, approaches that support users in learning and executing these gestures are still scarce. Prior approaches require complex setups, are targeted towards the use of 2D gestures, or focus on the initial gestures only. Our work extends state-of-the-art by presenting a feedforward system that provides users constant updates on their gestures. We report on the design and implementation of the approach and present findings from an evaluation of the system in a lab study (N=44), focusing on learning performance, accuracy, and errors. We found that ShapelineGuide helps users with regard to learning the gestures as well as decreases execution times and cognitive load.
@InProceedings{alt2018perdis,
author = {Alt, Florian and Geiger, Sabrina and H\"{o}hl, Wolfgang},
title = {ShapelineGuide: Teaching Mid-Air Gestures for Large Interactive Displays},
booktitle = {Proceedings of the 7th ACM International Symposium on Pervasive Displays},
year = {2018},
series = {PerDis '18},
pages = {3:1--3:8},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2018perdis},
abstract = {We present ShapelineGuide, a dynamic visual guide that supports users of large interactive displays while performing mid-air gestures. Today, we find many examples of large displays supporting interaction through gestures performed in Mid-air. Yet, approaches that support users in learning and executing these gestures are still scarce. Prior approaches require complex setups, are targeted towards the use of 2D gestures, or focus on the initial gestures only. Our work extends state-of-the-art by presenting a feedforward system that provides users constant updates on their gestures. We report on the design and implementation of the approach and present findings from an evaluation of the system in a lab study (N=44), focusing on learning performance, accuracy, and errors. We found that ShapelineGuide helps users with regard to learning the gestures as well as decreases execution times and cognitive load.},
acmid = {3205887},
articleno = {3},
doi = {10.1145/3205873.3205887},
isbn = {978-1-4503-5765-4},
keywords = {Displays, Dynamic Guides, Feedback, Feedforward, Gestures},
location = {Munich, Germany},
numpages = {8},
timestamp = {2018.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2018perdis.pdf},
}
T. Mattusch, M. Mirzamohammad, M. Khamis, A. Bulling, and F. Alt, “Hidden pursuits: evaluating gaze-selection via pursuits when the stimuli’s trajectory is partially hidden,” in Proceedings of the 2018 acm symposium on eye tracking research & applications, New York, NY, USA, 2018, p. 27:1–27:5. doi:10.1145/3204493.3204569
[BibTeX] [Abstract] [Download PDF]
The idea behind gaze interaction using Pursuits is to leverage the human’s smooth pursuit eye movements performed when following moving targets. However, humans can also anticipate where a moving target would reappear if it temporarily hides from their view. In this work, we investigate how well users can select targets using Pursuits in cases where the target’s trajectory is partially invisible (HiddenPursuits): e.g., can users select a moving target that temporarily hides behind another object? Although HiddenPursuits was not studied in the context of interaction before, understanding how well users can perform HiddenPursuits presents numerous opportunities, particularly for small interfaces where a target’s trajectory can cover area outside of the screen. We found that users can still select targets quickly via Pursuits even if their trajectory is up to 50% hidden, and at the expense of longer selection times when the hidden portion is larger. We discuss how gaze-based interfaces can leverage HiddenPursuits for an improved user experience.
@InProceedings{mattusch2018etra,
author = {Mattusch, Thomas and Mirzamohammad, Mahsa and Khamis, Mohamed and Bulling, Andreas and Alt, Florian},
title = {Hidden Pursuits: Evaluating Gaze-selection via Pursuits when the Stimuli's Trajectory is Partially Hidden},
booktitle = {Proceedings of the 2018 ACM Symposium on Eye Tracking Research \& Applications},
year = {2018},
series = {ETRA '18},
pages = {27:1--27:5},
address = {New York, NY, USA},
publisher = {ACM},
note = {mattusch2018etra},
abstract = {The idea behind gaze interaction using Pursuits is to leverage the human's smooth pursuit eye movements performed when following moving targets. However, humans can also anticipate where a moving target would reappear if it temporarily hides from their view. In this work, we investigate how well users can select targets using Pursuits in cases where the target's trajectory is partially invisible (HiddenPursuits): e.g., can users select a moving target that temporarily hides behind another object? Although HiddenPursuits was not studied in the context of interaction before, understanding how well users can perform HiddenPursuits presents numerous opportunities, particularly for small interfaces where a target's trajectory can cover area outside of the screen. We found that users can still select targets quickly via Pursuits even if their trajectory is up to 50% hidden, and at the expense of longer selection times when the hidden portion is larger. We discuss how gaze-based interfaces can leverage HiddenPursuits for an improved user experience.},
acmid = {3204569},
articleno = {27},
doi = {10.1145/3204493.3204569},
isbn = {978-1-4503-5706-7},
keywords = {displays, hidden trajectory, motion correlation, smooth pursuit},
location = {Warsaw, Poland},
numpages = {5},
timestamp = {2018.06.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mattusch2018etra.pdf},
}
M. Khamis, F. Alt, and A. Bulling, “The past, present, and future of gaze-enabled handheld mobile devices: survey and lessons learned,” in Proceedings of the 20th international conference on human-computer interaction with mobile devices and services, New York, NY, USA, 2018, p. 38:1–38:17. doi:10.1145/3229434.3229452
[BibTeX] [Abstract] [Download PDF]
While first-generation mobile gaze interfaces required special-purpose hardware, recent advances in computational gaze estimation and the availability of sensor-rich and powerful devices is finally fulfilling the promise of pervasive eye tracking and eye-based interaction on off-the-shelf mobile devices. This work provides the first holistic view on the past, present, and future of eye tracking on handheld mobile devices. To this end, we discuss how research developed from building hardware prototypes, to accurate gaze estimation on unmodified smartphones and tablets. We then discuss implications by laying out 1) novel opportunities, including pervasive advertising and conducting in-the-wild eye tracking studies on handhelds, and 2) new challenges that require further research, such as visibility of the user’s eyes, lighting conditions, and privacy implications. We discuss how these developments shape MobileHCI research in the future, possibly the next 20 years.
@InProceedings{khamis2018mobilehci,
author = {Khamis, Mohamed and Alt, Florian and Bulling, Andreas},
title = {The Past, Present, and Future of Gaze-enabled Handheld Mobile Devices: Survey and Lessons Learned},
booktitle = {Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services},
year = {2018},
series = {MobileHCI '18},
pages = {38:1--38:17},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2018mobilehci},
abstract = {While first-generation mobile gaze interfaces required special-purpose hardware, recent advances in computational gaze estimation and the availability of sensor-rich and powerful devices is finally fulfilling the promise of pervasive eye tracking and eye-based interaction on off-the-shelf mobile devices. This work provides the first holistic view on the past, present, and future of eye tracking on handheld mobile devices. To this end, we discuss how research developed from building hardware prototypes, to accurate gaze estimation on unmodified smartphones and tablets. We then discuss implications by laying out 1) novel opportunities, including pervasive advertising and conducting in-the-wild eye tracking studies on handhelds, and 2) new challenges that require further research, such as visibility of the user's eyes, lighting conditions, and privacy implications. We discuss how these developments shape MobileHCI research in the future, possibly the next 20 years.},
acmid = {3229452},
articleno = {38},
doi = {10.1145/3229434.3229452},
isbn = {978-1-4503-5898-9},
keywords = {eye tracking, gaze estimation, gaze interaction, mobile devices, smartphones, tablets},
location = {Barcelona, Spain},
numpages = {17},
timestamp = {2018.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018mobilehci.pdf},
}
M. Khamis, D. Buschek, T. Thieron, F. Alt, and A. Bulling, “Eyepact: eye-based parallax correction on touch-enabled interactive displays,” Proc. acm interact. mob. wearable ubiquitous technol., vol. 1, iss. 4, p. 146:1–146:18, 2018. doi:10.1145/3161168
[BibTeX] [Abstract] [Download PDF]
The parallax effect describes the displacement between the perceived and detected touch locations on a touch-enabled surface. Parallax is a key usability challenge for interactive displays, particularly for those that require thick layers of glass between the screen and the touch surface to protect them from vandalism. To address this challenge, we present EyePACT, a method that compensates for input error caused by parallax on public displays. Our method uses a display-mounted depth camera to detect the user’s 3D eye position in front of the display and the detected touch location to predict the perceived touch location on the surface. We evaluate our method in two user studies in terms of parallax correction performance as well as multi-user support. Our evaluations demonstrate that EyePACT (1) significantly improves accuracy even with varying gap distances between the touch surface and the display, (2) adapts to different levels of parallax by resulting in significantly larger corrections with larger gap distances, and (3) maintains a significantly large distance between two users’ fingers when interacting with the same object. These findings are promising for the development of future parallax-free interactive displays.
@Article{khamis2018imwut,
author = {Khamis, Mohamed and Buschek, Daniel and Thieron, Tobias and Alt, Florian and Bulling, Andreas},
title = {EyePACT: Eye-Based Parallax Correction on Touch-Enabled Interactive Displays},
journal = {Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.},
year = {2018},
volume = {1},
number = {4},
pages = {146:1--146:18},
month = jan,
issn = {2474-9567},
note = {khamis2018imwut},
abstract = {The parallax effect describes the displacement between the perceived and detected touch locations on a touch-enabled surface. Parallax is a key usability challenge for interactive displays, particularly for those that require thick layers of glass between the screen and the touch surface to protect them from vandalism. To address this challenge, we present EyePACT, a method that compensates for input error caused by parallax on public displays. Our method uses a display-mounted depth camera to detect the user's 3D eye position in front of the display and the detected touch location to predict the perceived touch location on the surface. We evaluate our method in two user studies in terms of parallax correction performance as well as multi-user support. Our evaluations demonstrate that EyePACT (1) significantly improves accuracy even with varying gap distances between the touch surface and the display, (2) adapts to different levels of parallax by resulting in significantly larger corrections with larger gap distances, and (3) maintains a significantly large distance between two users' fingers when interacting with the same object. These findings are promising for the development of future parallax-free interactive displays.},
acmid = {3161168},
address = {New York, NY, USA},
articleno = {146},
doi = {10.1145/3161168},
issue_date = {December 2017},
keywords = {Gaze, Parallax, Public Displays, Touch screens},
numpages = {18},
publisher = {ACM},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018imwut.pdf},
}
D. Buschek, M. Hassib, and F. Alt, “Personal mobile messaging in context: chat augmentations for expressiveness and awareness,” Acm trans. comput.-hum. interact., vol. 25, iss. 4, p. 23:1–23:33, 2018. doi:10.1145/3201404
[BibTeX] [Abstract] [Download PDF]
Mobile text messaging is one of the most important communication channels today, but it suffers from lack of expressiveness, context and emotional awareness, compared to face-to-face communication. We address this problem by augmenting text messaging with information about users and contexts. We present and reflect on lessons learned from three field studies, in which we deployed augmentation concepts as prototype chat apps in users’ daily lives. We studied (1) subtly conveying context via dynamic font personalisation (TapScript), (2) integrating and sharing physiological data – namely heart rate – implicitly or explicitly (HeartChat) and (3) automatic annotation of various context cues: music, distance, weather and activities (ContextChat). Based on our studies, we discuss chat augmentation with respect to privacy concerns, understandability, connectedness and inferring context in addition to methodological lessons learned. Finally, we propose a design space for chat augmentation to guide future research, and conclude with practical design implications.
@Article{buschek2018tochi,
author = {Buschek, Daniel and Hassib, Mariam and Alt, Florian},
title = {Personal Mobile Messaging in Context: Chat Augmentations for Expressiveness and Awareness},
journal = {ACM Trans. Comput.-Hum. Interact.},
year = {2018},
volume = {25},
number = {4},
pages = {23:1--23:33},
month = aug,
issn = {1073-0516},
note = {buschek2018tochi},
abstract = {Mobile text messaging is one of the most important communication channels today, but it suffers from lack of expressiveness, context and emotional awareness, compared to face-to-face communication. We address this problem by augmenting text messaging with information about users and contexts. We present and reflect on lessons learned from three field studies, in which we deployed augmentation concepts as prototype chat apps in users’ daily lives. We studied (1) subtly conveying context via dynamic font personalisation (TapScript), (2) integrating and sharing physiological data – namely heart rate – implicitly or explicitly (HeartChat) and (3) automatic annotation of various context cues: music, distance, weather and activities (ContextChat). Based on our studies, we discuss chat augmentation with respect to privacy concerns, understandability, connectedness and inferring context in addition to methodological lessons learned. Finally, we propose a design space for chat augmentation to guide future research, and conclude with practical design implications.},
acmid = {3201404},
address = {New York, NY, USA},
articleno = {23},
doi = {10.1145/3201404},
issue_date = {August 2018},
keywords = {Mobile text messaging, chat context, heart rate, mobile device sensors},
numpages = {33},
publisher = {ACM},
timestamp = {2019.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2018tochi.pdf},
}
N. Müller, B. Eska, R. Schäffer, S. T. Völkel, M. Braun, G. Wiegand, and F. Alt, “Arch’N’Smile: A Jump’N’Run Game Using Facial Expression Recognition Control For Entertaining Children During Car Journeys,” in Proceedings of the 17th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2018, p. 335–339. doi:10.1145/3282894.3282918
[BibTeX] [Abstract] [Download PDF]
Children can be a distraction to the driver during a car ride. With our work, we try to combine the possibility of facial expression recognition in the car with a game for children. The goal is that the parents can focus on the driving task while the child is busy and entertained. We conducted a study with children and parents in a real driving situation. It turned out that children can handle and enjoy games with facial recognition controls, which leads us to the conclusion that face recognition in the car as a entertaining system for children should be developed further to exploit its full potential.
@InProceedings{mueller2018mum,
author = {M\"{u}ller, Niklas and Eska, Bettina and Sch\"{a}ffer, Richard and V\"{o}lkel, Sarah Theres and Braun, Michael and Wiegand, Gesa and Alt, Florian},
title = {{Arch'N'Smile: A Jump'N'Run Game Using Facial Expression Recognition Control For Entertaining Children During Car Journeys}},
booktitle = {Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia},
year = {2018},
series = {MUM'18},
pages = {335--339},
address = {New York, NY, USA},
publisher = {ACM},
note = {mueller2018mum},
abstract = {Children can be a distraction to the driver during a car ride. With our work, we try to combine the possibility of facial expression recognition in the car with a game for children. The goal is that the parents can focus on the driving task while the child is busy and entertained. We conducted a study with children and parents in a real driving situation. It turned out that children can handle and enjoy games with facial recognition controls, which leads us to the conclusion that face recognition in the car as a entertaining system for children should be developed further to exploit its full potential.},
acmid = {3282918},
doi = {10.1145/3282894.3282918},
isbn = {978-1-4503-6594-9},
keywords = {Children, Distraction, Driving, Entertainment, Face Recognition, Facial Expression, Game},
location = {Cairo, Egypt},
numpages = {5},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2018mum.pdf},
}
S. Prange, D. Buschek, and F. Alt, “An exploratory study on correlations of hand size and mobile touch interactions,” in Proceedings of the 17th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2018, p. 279–283. doi:10.1145/3282894.3282924
[BibTeX] [Abstract] [Download PDF]
We report on an exploratory study investigating the relationship of users’ hand sizes and aspects of their mobile touch interactions. Estimating hand size from interaction could inform, for example, UI adaptation, occlusion-aware UIs, and biometrics. We recorded touch data from 62 participants performing six touch tasks on a smartphone. Our results reveal considerable correlations between hand size and aspects of touch interaction, both for tasks with unrestricted “natural” postures and restricted hand locations. We discuss implications for applications and ideas for future work.
@InProceedings{prange2018mum,
author = {Prange, Sarah and Buschek, Daniel and Alt, Florian},
title = {An Exploratory Study on Correlations of Hand Size and Mobile Touch Interactions},
booktitle = {Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia},
year = {2018},
series = {MUM'18},
pages = {279--283},
address = {New York, NY, USA},
publisher = {ACM},
note = {prange2018mum},
abstract = {We report on an exploratory study investigating the relationship of users' hand sizes and aspects of their mobile touch interactions. Estimating hand size from interaction could inform, for example, UI adaptation, occlusion-aware UIs, and biometrics. We recorded touch data from 62 participants performing six touch tasks on a smartphone. Our results reveal considerable correlations between hand size and aspects of touch interaction, both for tasks with unrestricted "natural" postures and restricted hand locations. We discuss implications for applications and ideas for future work.},
acmid = {3282924},
doi = {10.1145/3282894.3282924},
isbn = {978-1-4503-6594-9},
keywords = {Correlation, Hand Size, Scrolling, Swiping, Targeting, Touch},
location = {Cairo, Egypt},
numpages = {5},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2018mum.pdf},
}
L. Mecke, K. Pfeuffer, S. Prange, and F. Alt, “Open sesame!: user perception of physical, biometric, and behavioural authentication concepts to open doors,” in Proceedings of the 17th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2018, p. 153–159. doi:10.1145/3282894.3282923
[BibTeX] [Abstract] [Download PDF]
In usable security (e.g., smartphone authentication), a lot of emphasis is put on low-effort authentication and access concepts. Yet, only very few approaches exist where such concepts are applied beyond digital devices. We investigate and explore seamless authentication systems at doors, where most currently used systems for seamless access rely on the use of tokens. In a Wizard-of-Oz study, we investigate three different authentication schemes, namely (1) key, (2) palm vein scanner and (3) gait-based authentication (compare Fig. 1). Most participants in our study (N=15) preferred the palm vein scanner, while ranking unlocking with a key and gait-based recognition second and third. Our results propose that recovery costs for a failed authentication attempt have an impact on user perception. Furthermore, while the participants appreciated seamless authentication via biometrics, they also valued the control they gain from the possession of a physical token.
@InProceedings{mecke2018mum,
author = {Mecke, Lukas and Pfeuffer, Ken and Prange, Sarah and Alt, Florian},
title = {Open Sesame!: User Perception of Physical, Biometric, and Behavioural Authentication Concepts to Open Doors},
booktitle = {Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia},
year = {2018},
series = {MUM'18},
pages = {153--159},
address = {New York, NY, USA},
publisher = {ACM},
note = {mecke2018mum},
abstract = {In usable security (e.g., smartphone authentication), a lot of emphasis is put on low-effort authentication and access concepts. Yet, only very few approaches exist where such concepts are applied beyond digital devices. We investigate and explore seamless authentication systems at doors, where most currently used systems for seamless access rely on the use of tokens. In a Wizard-of-Oz study, we investigate three different authentication schemes, namely (1) key, (2) palm vein scanner and (3) gait-based authentication (compare Fig. 1). Most participants in our study (N=15) preferred the palm vein scanner, while ranking unlocking with a key and gait-based recognition second and third. Our results propose that recovery costs for a failed authentication attempt have an impact on user perception. Furthermore, while the participants appreciated seamless authentication via biometrics, they also valued the control they gain from the possession of a physical token.},
acmid = {3282923},
doi = {10.1145/3282894.3282923},
isbn = {978-1-4503-6594-9},
keywords = {(Behavioural) Biometrics, Authentication, User Perception, Wizard-of-Oz},
location = {Cairo, Egypt},
numpages = {7},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2018mum.pdf},
}
H. Drewes, M. Khamis, and F. Alt, “Smooth pursuit target speeds and trajectories,” in Proceedings of the 17th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2018, p. 139–146. doi:10.1145/3282894.3282913
[BibTeX] [Abstract] [Download PDF]
In this paper we present an investigation of how the speed and trajectory of smooth pursuits targets impact on detection rates in gaze interfaces. Previous work optimized these values for the specific application for which smooth pursuit eye movements were employed. However, this may not always be possible. For example UI designers may want to minimize distraction caused by the stimulus, integrate it with a certain UI element (e.g., a button), or limit it to a certain area of the screen. In these cases an in-depth understanding of the interplay between speed, trajectory, and accuracy is required. To achieve this, we conducted a user study with 15 participants who had to follow targets with different speeds and on different trajectories using their gaze. We evaluated the data with respect to detectability. As a result, we obtained reasonable ranges for target speeds and demonstrate the effects of trajectory shapes. We show that slow moving targets are hard to detect by correlation and that introducing a delay improves the detection rate for fast moving targets. Our research is complemented by design rules which enable designers to implement better pursuit detectors and pursuit-based user interfaces.
@InProceedings{drewes2018mum,
author = {Drewes, Heiko and Khamis, Mohamed and Alt, Florian},
title = {Smooth Pursuit Target Speeds and Trajectories},
booktitle = {Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia},
year = {2018},
series = {MUM'18},
pages = {139--146},
address = {New York, NY, USA},
publisher = {ACM},
note = {drewes2018mum},
abstract = {In this paper we present an investigation of how the speed and trajectory of smooth pursuits targets impact on detection rates in gaze interfaces. Previous work optimized these values for the specific application for which smooth pursuit eye movements were employed. However, this may not always be possible. For example UI designers may want to minimize distraction caused by the stimulus, integrate it with a certain UI element (e.g., a button), or limit it to a certain area of the screen. In these cases an in-depth understanding of the interplay between speed, trajectory, and accuracy is required. To achieve this, we conducted a user study with 15 participants who had to follow targets with different speeds and on different trajectories using their gaze. We evaluated the data with respect to detectability. As a result, we obtained reasonable ranges for target speeds and demonstrate the effects of trajectory shapes. We show that slow moving targets are hard to detect by correlation and that introducing a delay improves the detection rate for fast moving targets. Our research is complemented by design rules which enable designers to implement better pursuit detectors and pursuit-based user interfaces.},
acmid = {3282913},
doi = {10.1145/3282894.3282913},
isbn = {978-1-4503-6594-9},
keywords = {Eye tracking, pursuit detection, smooth pursuits, trajectories},
location = {Cairo, Egypt},
numpages = {8},
timestamp = {2018.11.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/drewes2018mum.pdf},
}
M. Braun, B. Pfleging, and F. Alt, “A survey to understand emotional situations on the road and what they mean for affective automotive uis,” Multimodal technologies and interaction, vol. 2, iss. 4, 2018. doi:10.3390/mti2040075
[BibTeX] [Abstract] [Download PDF]
In this paper, we present the results of an online survey (N = 170) on emotional situations on the road. In particular, we asked potential early adopters to remember a situation where they felt either an intense positive or negative emotion while driving. Our research is motivated by imminent disruptions in the automotive sector due to automated driving and the accompanying switch to selling driving experiences over horsepower. This creates a need to focus on the driver’s emotion when designing in-car interfaces. As a result of our research, we present a set of propositions for affective car interfaces based on real-life experiences. With our work we aim to support the design of affective car interfaces and give designers a foundation to build upon. We find respondents often connect positive emotions with enjoying their independence, while negative experiences are associated mostly with traffic behavior. Participants who experienced negative situations wished for better information management and a higher degree of automation. Drivers with positive emotions generally wanted to experience the situation more genuinely, for example, by switching to a “back-to-basic” mode. We explore these statements and discuss recommendations for the design of affective interfaces in future cars.
@Article{braun2018mdpi,
author = {Braun, Michael and Pfleging, Bastian and Alt, Florian},
title = {A Survey to Understand Emotional Situations on the Road and What They Mean for Affective Automotive UIs},
journal = {Multimodal Technologies and Interaction},
year = {2018},
volume = {2},
number = {4},
issn = {2414-4088},
note = {braun2018mdpi},
abstract = { In this paper, we present the results of an online survey (N = 170) on emotional situations on the road. In particular, we asked potential early adopters to remember a situation where they felt either an intense positive or negative emotion while driving. Our research is motivated by imminent disruptions in the automotive sector due to automated driving and the accompanying switch to selling driving experiences over horsepower. This creates a need to focus on the driver’s emotion when designing in-car interfaces. As a result of our research, we present a set of propositions for affective car interfaces based on real-life experiences. With our work we aim to support the design of affective car interfaces and give designers a foundation to build upon. We find respondents often connect positive emotions with enjoying their independence, while negative experiences are associated mostly with traffic behavior. Participants who experienced negative situations wished for better information management and a higher degree of automation. Drivers with positive emotions generally wanted to experience the situation more genuinely, for example, by switching to a “back-to-basic” mode. We explore these statements and discuss recommendations for the design of affective interfaces in future cars.},
address = {Basel, Switzerland},
article-number = {75},
doi = {10.3390/mti2040075},
publisher = {MDPI},
timestamp = {2018.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2018mdpi.pdf},
}
M. Khamis, L. Trotter, V. Mäkelä, E. von Zezschwitz, J. Le, A. Bulling, and F. Alt, “Cueauth: comparing touch, mid-air gestures, and gaze for cue-based authentication on situated displays,” Proc. acm interact. mob. wearable ubiquitous technol., vol. 2, iss. 4, p. 174:1–174:22, 2018. doi:10.1145/3287052
[BibTeX] [Abstract] [Download PDF]
{Secure authentication on situated displays (e.g., to access sensitive information or to make purchases) is becoming increasingly important. A promising approach to resist shoulder surfing attacks is to employ cues that users respond to while authenticating; this overwhelms observers by requiring them to observe both the cue itself as well as users’ response to the cue. Although previous work proposed a variety of modalities, such as gaze and mid-air gestures, to further improve security, an understanding of how they compare with regard to usability and security is still missing as of today. In this paper, we rigorously compare modalities for cue-based authentication on situated displays. In particular, we provide the first comparison between touch, mid-air gestures, and calibration-free gaze using a state-of-the-art authentication concept. In two in-depth user studies (N=20
@Article{Khamis2018,
author = {Khamis, Mohamed and Trotter, Ludwig and M\"{a}kel\"{a}, Ville and Zezschwitz, Emanuel von and Le, Jens and Bulling, Andreas and Alt, Florian},
title = {CueAuth: Comparing Touch, Mid-Air Gestures, and Gaze for Cue-based Authentication on Situated Displays},
journal = {Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.},
year = {2018},
volume = {2},
number = {4},
pages = {174:1--174:22},
month = dec,
issn = {2474-9567},
note = {khamis2018imwut},
abstract = {Secure authentication on situated displays (e.g., to access sensitive information or to make purchases) is becoming increasingly important. A promising approach to resist shoulder surfing attacks is to employ cues that users respond to while authenticating; this overwhelms observers by requiring them to observe both the cue itself as well as users' response to the cue. Although previous work proposed a variety of modalities, such as gaze and mid-air gestures, to further improve security, an understanding of how they compare with regard to usability and security is still missing as of today. In this paper, we rigorously compare modalities for cue-based authentication on situated displays. In particular, we provide the first comparison between touch, mid-air gestures, and calibration-free gaze using a state-of-the-art authentication concept. In two in-depth user studies (N=20, N=17) we found that the choice of touch or gaze presents a clear tradeoff between usability and security. For example, while gaze input is more secure, it is also more demanding and requires longer authentication times. Mid-air gestures are slightly slower and more secure than touch but users hesitate to use them in public. We conclude with three significant design implications for authentication using touch, mid-air gestures, and gaze and discuss how the choice of modality creates opportunities and challenges for improved authentication in public.},
acmid = {3287052},
address = {New York, NY, USA},
articleno = {174},
doi = {10.1145/3287052},
issue_date = {December 2018},
keywords = {Eye Tracking, Privacy, Public Displays, Pursuits, SwiPIN},
numpages = {22},
publisher = {ACM},
timestamp = {2018.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018imwut.pdf},
}
L. Mecke, S. Prange, D. Buschek, M. Khamis, M. Hassib, and F. Alt, “‘Outsourcing” Security: Supporting People to Support Older Adults’,” in Proceedings of the Mobile HCI ’18 Workshop on Mobile Privacy and Security for an Aging Population, 2018.
[BibTeX] [Abstract] [Download PDF]
Older adults often rely on the support of trusted individuals (e.g., younger family members) when performing complex tasks on their mobile devices, such as configuring privacy settings. However, a prominent problem is that systems are designed with the intention of a single “main user” us- ing them, with little to no support for cases where the user would like to get external help from others. In this work, we provide anecdotal evidence of problems faced by support- ers who try to help older adults in privacy and security re- lated tasks. We outline multiple suggestions for future work in this area, and discuss how systems can support people who support older adults.
@InProceedings{mecke2018mobilehciadj,
author = {Lukas Mecke AND Sarah Prange AND Daniel Buschek AND Mohamed Khamis AND Mariam Hassib AND Florian Alt},
title = {{'Outsourcing” Security: Supporting People to Support Older Adults'}},
booktitle = {{Proceedings of the Mobile HCI ’18 Workshop on Mobile Privacy and Security for an Aging Population}},
year = {2018},
note = {mecke2018mobilehciadj},
abstract = {Older adults often rely on the support of trusted individuals (e.g., younger family members) when performing complex tasks on their mobile devices, such as configuring privacy settings. However, a prominent problem is that systems are designed with the intention of a single “main user” us- ing them, with little to no support for cases where the user would like to get external help from others. In this work, we provide anecdotal evidence of problems faced by support- ers who try to help older adults in privacy and security re- lated tasks. We outline multiple suggestions for future work in this area, and discuss how systems can support people who support older adults.},
owner = {florian},
timestamp = {2018.08.31},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mecke2018mobilehciadj.pdf},
}
M. Braun, S. Weiser, B. Pfleging, and F. Alt, “A comparison of emotion elicitation methods for affective driving studies,” in Proceedings of the 10th international conference on automotive user interfaces and interactive vehicular applications, New York, NY, USA, 2018, p. 77–81. doi:10.1145/3239092.3265945
[BibTeX] [Abstract] [Download PDF]
Advances in sensing technology enable the emotional state of car drivers to be captured and interfaces to be built that respond to these emotions. To evaluate such emotion-aware interfaces, researchers need to evoke certain emotional states within participants. Emotion elicitation in driving studies poses a challenge as the driving task can interfere with the elicitation task. Induced emotions also lose intensity with time and through secondary tasks. This is why we have analyzed different emotion elicitation techniques for their suitability in automotive research and compared the most promising approaches in a user study. We recommend using autobiographical recollection to induce emotions in driving studies, and suggest a way to prolong emotional states with music playback. We discuss experiences from a a driving simulator study, including solutions for addressing potential privacy issues.
@InProceedings{braun2018autouiadj1,
author = {Braun, Michael and Weiser, Simon and Pfleging, Bastian and Alt, Florian},
title = {A Comparison of Emotion Elicitation Methods for Affective Driving Studies},
booktitle = {Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications},
year = {2018},
series = {AutomotiveUI '18},
pages = {77--81},
address = {New York, NY, USA},
publisher = {ACM},
note = {braun2018autouiadj1},
abstract = {Advances in sensing technology enable the emotional state of car drivers to be captured and interfaces to be built that respond to these emotions. To evaluate such emotion-aware interfaces, researchers need to evoke certain emotional states within participants. Emotion elicitation in driving studies poses a challenge as the driving task can interfere with the elicitation task. Induced emotions also lose intensity with time and through secondary tasks. This is why we have analyzed different emotion elicitation techniques for their suitability in automotive research and compared the most promising approaches in a user study. We recommend using autobiographical recollection to induce emotions in driving studies, and suggest a way to prolong emotional states with music playback. We discuss experiences from a a driving simulator study, including solutions for addressing potential privacy issues.},
acmid = {3265945},
doi = {10.1145/3239092.3265945},
isbn = {978-1-4503-5947-4},
keywords = {Affective Computing, Driving Studies, Emotion Elicitation},
location = {Toronto, ON, Canada},
numpages = {5},
timestamp = {2018.10.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2018autouiadj1.pdf},
}
M. Braun, F. Roider, F. Alt, and T. Gross, “Automotive research in the public space: towards deployment-based prototypes for real users,” in Proceedings of the 10th international conference on automotive user interfaces and interactive vehicular applications, New York, NY, USA, 2018, p. 181–185. doi:10.1145/3239092.3265964
[BibTeX] [Abstract] [Download PDF]
Many automotive user studies allow users to experience and evaluate interactive concepts. They are however often limited to small and specific groups of participants, such as students or experts. This might limit the generalizability of results for future users. A possible solution is to allow a large group of unbiased users to actively experience an interactive prototype and generate new ideas, but there is little experience about the realization and benefits of such an approach. We placed an interactive prototype in a public space and gathered objective and subjective data from 693 participants over the course of three months. We found a high variance in data quality and identified resulting restrictions for suitable research questions. This results in concrete requirements to hardware, software, and analytics, e.g. the need for assessing data quality, and give examples how this approach lets users explore a system and give first-contact feedback which differentiates highly from common in-depth expert analyses.
@InProceedings{braun2018autouiadj2,
author = {Braun, Michael and Roider, Florian and Alt, Florian and Gross, Tom},
title = {Automotive Research in the Public Space: Towards Deployment-Based Prototypes For Real Users},
booktitle = {Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications},
year = {2018},
series = {AutomotiveUI '18},
pages = {181--185},
address = {New York, NY, USA},
publisher = {ACM},
note = {braun2018autouiadj2},
abstract = {Many automotive user studies allow users to experience and evaluate interactive concepts. They are however often limited to small and specific groups of participants, such as students or experts. This might limit the generalizability of results for future users. A possible solution is to allow a large group of unbiased users to actively experience an interactive prototype and generate new ideas, but there is little experience about the realization and benefits of such an approach. We placed an interactive prototype in a public space and gathered objective and subjective data from 693 participants over the course of three months. We found a high variance in data quality and identified resulting restrictions for suitable research questions. This results in concrete requirements to hardware, software, and analytics, e.g. the need for assessing data quality, and give examples how this approach lets users explore a system and give first-contact feedback which differentiates highly from common in-depth expert analyses.},
acmid = {3265964},
doi = {10.1145/3239092.3265964},
isbn = {978-1-4503-5947-4},
keywords = {Automotive UI, Deployment, Prototypes, User Studies},
location = {Toronto, ON, Canada},
numpages = {5},
timestamp = {2018.10.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2018autouiadj2.pdf},
}
M. Braun, Frison Anna-Katharina, S. T. Völkel, F. Alt, H. Hussmann, and A. Riener, “Beyond transportation: how to keep users attached when they are neither driving nor owning automated cars?,” in Proceedings of the 10th international conference on automotive user interfaces and interactive vehicular applications, New York, NY, USA, 2018, p. 175–180. doi:10.1145/3239092.3265963
[BibTeX] [Abstract] [Download PDF]
The way drivers relate to cars is likely bound to change with the rise of automated vehicles and new ownership models. However, personal relationships towards products are an important part of buying decisions. Car manufacturers thus need to provide novel bonding experiences for their future customers in order to stay competitive. We introduce a vehicle attachment model based on related work from other domains. In interviews with 16 car owners we verify the approach as promising and derive four attachment types by applying the model: interviewees’ personal attachments were grounded on either self-empowering reasons, memories with the car, increased status, or a loving friendship towards their car. We propose how to address the needs of these four attachment types as a first step towards emotionally irreplaceable automated and shared vehicles.
@InProceedings{braun2018autouiadj3,
author = {Braun, Michael and Frison, Anna-Katharina, and V\"{o}lkel, Sarah Theres and Alt, Florian and Hussmann, Heinrich and Riener, Andreas},
title = {Beyond Transportation: How to Keep Users Attached When They Are Neither Driving nor Owning Automated Cars?},
booktitle = {Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications},
year = {2018},
series = {AutomotiveUI '18},
pages = {175--180},
address = {New York, NY, USA},
publisher = {ACM},
note = {braun2018autouiadj3},
abstract = {The way drivers relate to cars is likely bound to change with the rise of automated vehicles and new ownership models. However, personal relationships towards products are an important part of buying decisions. Car manufacturers thus need to provide novel bonding experiences for their future customers in order to stay competitive. We introduce a vehicle attachment model based on related work from other domains. In interviews with 16 car owners we verify the approach as promising and derive four attachment types by applying the model: interviewees' personal attachments were grounded on either self-empowering reasons, memories with the car, increased status, or a loving friendship towards their car. We propose how to address the needs of these four attachment types as a first step towards emotionally irreplaceable automated and shared vehicles.},
acmid = {3265963},
doi = {10.1145/3239092.3265963},
isbn = {978-1-4503-5947-4},
keywords = {Automated Cars, Car Sharing, Vehicle Attachment},
location = {Toronto, ON, Canada},
numpages = {6},
timestamp = {2018.10.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2018autouiadj3.pdf},
}
M. Braun, S. T. Völkel, G. Wiegand, T. Puls, D. Steidl, Y. Weiß, and F. Alt, “The smile is the new like: controlling music with facial expressions to minimize driver distraction,” in Proceedings of the 17th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2018, p. 383–389. doi:10.1145/3282894.3289729
[BibTeX] [Abstract] [Download PDF]
The control of user interfaces while driving is a textbook example for driver distraction. Modern in-car interfaces are growing in complexity and visual demand, yet they need to stay simple enough to handle while driving. One common approach to solve this problem are multimodal interfaces, incorporating e.g. touch, speech, and mid-air gestures for the control of distinct features. This allows for an optimization of used cognitive resources and can relieve the driver of potential overload. We introduce a novel modality for in-car interaction: our system allows drivers to use facial expressions to control a music player. The results of a user study show that both implicit emotion recognition and explicit facial expressions are applicable for music control in cars. Subconscious emotion recognition could decrease distraction, while explicit expressions can be used as an alternative input modality. A simple smiling gesture showed good potential, e.g. to save favorite songs.
@InProceedings{braun18mumadj,
author = {Braun, Michael and V\"{o}lkel, Sarah Theres and Wiegand, Gesa and Puls, Thomas and Steidl, Daniel and Wei\ss, Yannick and Alt, Florian},
title = {The Smile is The New Like: Controlling Music with Facial Expressions to Minimize Driver Distraction},
booktitle = {Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia},
year = {2018},
series = {MUM 2018},
pages = {383--389},
address = {New York, NY, USA},
publisher = {ACM},
note = {braun2018mumadj},
abstract = {The control of user interfaces while driving is a textbook example for driver distraction. Modern in-car interfaces are growing in complexity and visual demand, yet they need to stay simple enough to handle while driving. One common approach to solve this problem are multimodal interfaces, incorporating e.g. touch, speech, and mid-air gestures for the control of distinct features. This allows for an optimization of used cognitive resources and can relieve the driver of potential overload. We introduce a novel modality for in-car interaction: our system allows drivers to use facial expressions to control a music player. The results of a user study show that both implicit emotion recognition and explicit facial expressions are applicable for music control in cars. Subconscious emotion recognition could decrease distraction, while explicit expressions can be used as an alternative input modality. A simple smiling gesture showed good potential, e.g. to save favorite songs.},
acmid = {3289729},
doi = {10.1145/3282894.3289729},
isbn = {978-1-4503-6594-9},
keywords = {Affective Computing, Automotive User Interfaces, Driver Distraction, Face Recognition, Multimodal Interaction},
location = {Cairo, Egypt},
numpages = {7},
timestamp = {2018.11.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2018mumadj.pdf},
}
M. Kattenbeck, M. A. Kilian, M. Ferstl, F. Alt, and B. Ludwig, “Airbot: using a work flow model for proactive assistance in public spaces,” in Proceedings of the 20th international conference on human-computer interaction with mobile devices and services adjunct, New York, NY, USA, 2018, p. 213–220. doi:10.1145/3236112.3236142
[BibTeX] [Download PDF]
@InProceedings{kattenbeck2018mobilehciadj,
author = {Kattenbeck, Markus and Kilian, Melanie A. and Ferstl, Matthias and Alt, Florian and Ludwig, Bernd},
title = {Airbot: Using a Work Flow Model for Proactive Assistance in Public Spaces},
booktitle = {Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct},
year = {2018},
series = {MobileHCI '18},
pages = {213--220},
address = {New York, NY, USA},
publisher = {ACM},
note = {kattenbeck2018mobilehciadj},
acmid = {3236142},
doi = {10.1145/3236112.3236142},
isbn = {978-1-4503-5941-2},
keywords = {assistance system, cooperative problem solving, human-computer interaction, mobile information needs},
location = {Barcelona, Spain},
numpages = {8},
timestamp = {2018.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/kattenbeck2018mobilehciadj},
}
M. Khamis, A. Kienle, F. Alt, and A. Bulling, “Gazedrone: mobile eye-based interaction in public space without augmenting the user,” in 4th acm workshop on micro aerial vehicle networks, systems, and applications (dronet ’18), New York, NY, USA, 2018, p. 66–71. doi:10.1145/3213526.3213539
[BibTeX] [Abstract] [Download PDF]
Gaze interaction holds a lot of promise for seamless human-computer interaction. At the same time, current wearable mobile eye trackers require user augmentation that negatively impacts natural user behavior while remote trackers require users to position themselves within a confined tracking range. We present GazeDrone, the first system that combines a camera-equipped aerial drone with a computational method to detect sidelong glances for spontaneous (calibration-free) gaze-based interaction with surrounding pervasive systems (e.g., public displays). GazeDrone does not require augmenting each user with on-body sensors and allows interaction from arbitrary positions, even while moving. We demonstrate that drone-supported gaze interaction is feasible and accurate for certain movement types. It is well-perceived by users, in particular while interacting from a fixed position as well as while moving orthogonally or diagonally to a display. We present design implications and discuss opportunities and challenges for drone-supported gaze interaction in public.
@InProceedings{khamis2018dronet,
author = {Mohamed Khamis and Anna Kienle and Florian Alt and Andreas Bulling},
title = {GazeDrone: Mobile Eye-Based Interaction in Public Space Without Augmenting the User},
booktitle = {4th ACM Workshop on Micro Aerial Vehicle Networks, Systems, and Applications (DroNet '18)},
year = {2018},
series = {DroNet'18},
pages = {66--71},
address = {New York, NY, USA},
month = {June},
publisher = {ACM},
note = {khamis2018dronet},
abstract = {Gaze interaction holds a lot of promise for seamless human-computer interaction. At the same time, current wearable mobile eye trackers require user augmentation that negatively impacts natural user behavior while remote trackers require users to position themselves within a confined tracking range. We present GazeDrone, the first system that combines a camera-equipped aerial drone with a computational method to detect sidelong glances for spontaneous (calibration-free) gaze-based interaction with surrounding pervasive systems (e.g., public displays). GazeDrone does not require augmenting each user with on-body sensors and allows interaction from arbitrary positions, even while moving. We demonstrate that drone-supported gaze interaction is feasible and accurate for certain movement types. It is well-perceived by users, in particular while interacting from a fixed position as well as while moving orthogonally or diagonally to a display. We present design implications and discuss opportunities and challenges for drone-supported gaze interaction in public.},
doi = {10.1145/3213526.3213539},
keywords = {Active eye tracking, drones, gaze interaction, UAV.},
timestamp = {2018.09.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2018dronet},
}
R. Poguntke, C. Tasci, O. Korhonen, F. Alt, and S. Schneegass, “Avotar: exploring personalized avatars for mobile interaction with public displays,” in Proceedings of the 20th international conference on human-computer interaction with mobile devices and services adjunct, New York, NY, USA, 2018, p. 1–8. doi:10.1145/3236112.3236113
[BibTeX] [Abstract] [Download PDF]
Engaging users with public displays has been a major challenge in public display research. Interactive displays often suffer from being ignored by potential users. Research showed that user representations are a valid way to partially address this challenge, e.g., by attracting attention, conveying interactivity, and serving as entry points to gestures and touch interaction. We believe that user representations, particularly personalized avatars, could further increase the attractiveness of public displays, if carefully designed. In this work, we provide first insights on how such avatars can be designed and which properties are important for users. In particular, we present AVotar, a voting application for mobiles that lets users design avatars being utilized to represent them. In an user study we found that users appreciate high degrees of freedom in customization and focus on expressive facial features. Finally, we discuss the findings yielding useful implications for designers of future public display applications employing avatars.
@InProceedings{poguntke2018mobilehciadj,
author = {Poguntke, Romina and Tasci, Cagri and Korhonen, Olli and Alt, Florian and Schneegass, Stefan},
title = {AVotar: Exploring Personalized Avatars for Mobile Interaction with Public Displays},
booktitle = {Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct},
year = {2018},
series = {MobileHCI '18},
pages = {1--8},
address = {New York, NY, USA},
publisher = {ACM},
note = {poguntke2018mobilehciadj},
abstract = {Engaging users with public displays has been a major challenge in public display research. Interactive displays often suffer from being ignored by potential users. Research showed that user representations are a valid way to partially address this challenge, e.g., by attracting attention, conveying interactivity, and serving as entry points to gestures and touch interaction. We believe that user representations, particularly personalized avatars, could further increase the attractiveness of public displays, if carefully designed. In this work, we provide first insights on how such avatars can be designed and which properties are important for users. In particular, we present AVotar, a voting application for mobiles that lets users design avatars being utilized to represent them. In an user study we found that users appreciate high degrees of freedom in customization and focus on expressive facial features. Finally, we discuss the findings yielding useful implications for designers of future public display applications employing avatars.},
acmid = {3236113},
doi = {10.1145/3236112.3236113},
isbn = {978-1-4503-5941-2},
keywords = {avatars, engagement, personalization, public displays, user representation},
location = {Barcelona, Spain},
numpages = {8},
timestamp = {2018.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/poguntke2018mobilehciadj},
}
L. Trotter, S. Prange, M. Khamis, N. Davies, and F. Alt, “Design considerations for secure and usable authentication on situated displays,” in Proceedings of the 17th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2018, p. 483–490. doi:10.1145/3282894.3289743
[BibTeX] [Abstract] [Download PDF]
Users often need to authenticate at situated displays in order to, for example, make purchases, access sensitive information, or confirm an identity. However, the exposure of interactions in public spaces introduces a large attack surface (e.g., observation, smudge or thermal attacks). A plethora of authentication models and input modalities that aim at disguising users’ input has been presented in the past. However, a comprehensive analysis on the requirements for secure and usable authentication on public displays is still missing. This work presents 13 design considerations suitable to inform practitioners and researchers during the development process of authentication systems for situated displays in public spaces. It draws on a comprehensive analysis of prior literature and subsequent discussion with five experts in the fields of pervasive displays, human-computer-interaction and usable security.
@InProceedings{trotter2018mumadj,
author = {Trotter, Ludwig and Prange, Sarah and Khamis, Mohamed and Davies, Nigel and Alt, Florian},
title = {Design Considerations for Secure and Usable Authentication on Situated Displays},
booktitle = {Proceedings of the 17th International Conference on Mobile and Ubiquitous Multimedia},
year = {2018},
series = {MUM 2018},
pages = {483--490},
address = {New York, NY, USA},
publisher = {ACM},
note = {trotter2018mumadj},
abstract = {Users often need to authenticate at situated displays in order to, for example, make purchases, access sensitive information, or confirm an identity. However, the exposure of interactions in public spaces introduces a large attack surface (e.g., observation, smudge or thermal attacks). A plethora of authentication models and input modalities that aim at disguising users' input has been presented in the past. However, a comprehensive analysis on the requirements for secure and usable authentication on public displays is still missing. This work presents 13 design considerations suitable to inform practitioners and researchers during the development process of authentication systems for situated displays in public spaces. It draws on a comprehensive analysis of prior literature and subsequent discussion with five experts in the fields of pervasive displays, human-computer-interaction and usable security.},
acmid = {3289743},
doi = {10.1145/3282894.3289743},
isbn = {978-1-4503-6594-9},
keywords = {Authentication, Design Considerations, Input Modalities, Public Displays, User Interface Design},
location = {Cairo, Egypt},
numpages = {8},
timestamp = {2011.11.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/trotter2018mumadj.pdf},
}
D. Weber, A. Voit, G. Kollotzek, L. van der Vekens, M. Hepting, F. Alt, and N. Henze, “Pd notify: investigating personal content on public displays,” in Extended abstracts of the 2018 chi conference on human factors in computing systems, New York, NY, USA, 2018, p. LBW014:1–LBW014:6. doi:10.1145/3170427.3188475
[BibTeX] [Abstract] [Download PDF]
Public displays are becoming more and more ubiquitous. Current public displays are mainly used as general information displays or to display advertisements. How personal content should be shown is still an important research topic. In this paper, we present PD Notify, a system that mirrors a user’s pending smartphone notifications on nearby public displays. Notifications are an essential part of current smartphones and inform users about various events, such as new messages, pending updates, personalized news, and upcoming appointments. PD Notify implements privacy settings to control what is shown on the public displays. We conducted an in-situ study in a semi-public work environment for three weeks with seven participants. The results of this first deployment show that displaying personal content on public displays is not only feasible but also valued by users. Participants quickly settled for privacy settings that work for all kinds of content. While they liked the system, they did not want to spend time configuring it.
@InProceedings{weber2018chiea,
author = {Weber, Dominik and Voit, Alexandra and Kollotzek, Gisela and van der Vekens, Lucas and Hepting, Marcus and Alt, Florian and Henze, Niels},
title = {PD Notify: Investigating Personal Content on Public Displays},
booktitle = {Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems},
year = {2018},
series = {CHI EA '18},
pages = {LBW014:1--LBW014:6},
address = {New York, NY, USA},
publisher = {ACM},
note = {weber2018chiea},
abstract = {Public displays are becoming more and more ubiquitous. Current public displays are mainly used as general information displays or to display advertisements. How personal content should be shown is still an important research topic. In this paper, we present PD Notify, a system that mirrors a user's pending smartphone notifications on nearby public displays. Notifications are an essential part of current smartphones and inform users about various events, such as new messages, pending updates, personalized news, and upcoming appointments. PD Notify implements privacy settings to control what is shown on the public displays. We conducted an in-situ study in a semi-public work environment for three weeks with seven participants. The results of this first deployment show that displaying personal content on public displays is not only feasible but also valued by users. Participants quickly settled for privacy settings that work for all kinds of content. While they liked the system, they did not want to spend time configuring it.},
acmid = {3188475},
articleno = {LBW014},
doi = {10.1145/3170427.3188475},
isbn = {978-1-4503-5621-3},
keywords = {notifications, pervasive, privacy, public displays},
location = {Montreal QC, Canada},
numpages = {6},
timestamp = {2018.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/weber2018chiea.pdf},
}

2017

Y. Abdelrahman, M. Khamis, S. Schneegass, and F. Alt, “Stay cool! understanding thermal attacks on mobile-based user authentication,” in Proceedings of the 35th annual acm conference on human factors in computing systems, New York, NY, USA, 2017. doi:10.1145/3025453.3025461
[BibTeX] [Abstract] [Download PDF]
PINs and patterns remain among the most widely used knowledge-based authentication schemes. As thermal cameras become ubiquitous and affordable, we foresee a new form of threat to user privacy on mobile devices. Thermal cameras allow performing thermal attacks, where heat traces, resulting from authentication, can be used to reconstruct passwords. In this work we investigate in details the viability of exploiting thermal imaging to infer PINs and patterns on mobile devices. We present a study (N=18) where we evaluated how properties of PINs and patterns influence their thermal attacks resistance. We found that thermal attacks are indeed viable on mobile devices; overlapping patterns significantly decrease successful thermal attack rate from 100% to 16.67%, while PINs remain vulnerable (>72% success rate) even with duplicate digits. We conclude by recommendations for users and designers of authentication schemes on how to resist thermal attacks.
@InProceedings{abdelrahman2017chi,
author = {Abdelrahman, Yomna and Khamis, Mohamed and Schneegass, Stefan and Alt, Florian},
title = {Stay Cool! Understanding Thermal Attacks on Mobile-based User Authentication},
booktitle = {Proceedings of the 35th Annual ACM Conference on Human Factors in Computing Systems},
year = {2017},
series = {CHI '17},
address = {New York, NY, USA},
publisher = {ACM},
note = {abdelrahman2017chi},
abstract = {PINs and patterns remain among the most widely used knowledge-based authentication schemes. As thermal cameras become ubiquitous and affordable, we foresee a new form of threat to user privacy on mobile devices. Thermal cameras allow performing thermal attacks, where heat traces, resulting from authentication, can be used to reconstruct passwords. In this work we investigate in details the viability of exploiting thermal imaging to infer PINs and patterns on mobile devices. We present a study (N=18) where we evaluated how properties of PINs and patterns influence their thermal attacks resistance. We found that thermal attacks are indeed viable on mobile devices; overlapping patterns significantly decrease successful thermal attack rate from 100% to 16.67%, while PINs remain vulnerable (>72% success rate) even with duplicate digits. We conclude by recommendations for users and designers of authentication schemes on how to resist thermal attacks.},
doi = {10.1145/3025453.3025461},
location = {Denver, CO, USA},
timestamp = {2017.05.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/abdelrahman2017chi.pdf},
}
M. Al Sada, M. Khamis, A. Kato, S. Sugano, T. Nakajima, and F. Alt, “Challenges and opportunities of supernumerary robotic limbs,” in Proceedings of the chi 2017 workshop on amplification and augmentation of human perception (amplify 2017), New York, NY, USA, 2017.
[BibTeX] [Abstract] [Download PDF]
Recent advancements in robotics and wearables made itpossible to augment humans with additional robotic limbs(e.g., extra pair of arms). However, these advances havebeen dispersed among different research communities withvery little attention to the user’s perspective. In this work wetake a first step to close this gap. We report on the resultsof two focus groups that uncovered expectations and concernsof potential users of Supernumerary Robotic Limbs(SRLs). There is a wide range of applications for SRLswithin daily usage contexts, like enabling new perceptions,commuting and communication methods as well as enhancingexisting ones. Yet, several requirements need to be metbefore SRLs can be widely adopted, such as multipurposedesign and adequate sensory feedback. We discuss howthese findings influence the design of future SRLs.
@InProceedings{alsada2017amplify,
author = {Al Sada, Mohammed and Khamis, Mohamed and Kato, Akira and Sugano, Shigeki and Nakajima, Tatsuo and Alt, Florian},
title = {Challenges and Opportunities of Supernumerary Robotic Limbs},
booktitle = {Proceedings of the CHI 2017 Workshop on Amplification and Augmentation of Human Perception (Amplify 2017)},
year = {2017},
series = {Amplify '17},
address = {New York, NY, USA},
note = {alsada2017amplify},
abstract = {Recent advancements in robotics and wearables made itpossible to augment humans with additional robotic limbs(e.g., extra pair of arms). However, these advances havebeen dispersed among different research communities withvery little attention to the user’s perspective. In this work wetake a first step to close this gap. We report on the resultsof two focus groups that uncovered expectations and concernsof potential users of Supernumerary Robotic Limbs(SRLs). There is a wide range of applications for SRLswithin daily usage contexts, like enabling new perceptions,commuting and communication methods as well as enhancingexisting ones. Yet, several requirements need to be metbefore SRLs can be widely adopted, such as multipurposedesign and adequate sensory feedback. We discuss howthese findings influence the design of future SRLs.},
location = {Denver, CO, USA},
timestamp = {2017.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alsada2017amplify.pdf},
}
D. Buschek and F. Alt, “Probui: generalising touch target representations to enable declarative gesture definition for probabilistic guis,” in Proceedings of the 2017 chi conference on human factors in computing systems, New York, NY, USA, 2017, p. 4640–4653. doi:10.1145/3025453.3025502
[BibTeX] [Abstract] [Download PDF]
We present ProbUI, a mobile touch GUI framework that merges ease of use of declarative gesture definition with the benefits of probabilistic reasoning. It helps developers to handle uncertain input and implement feedback and GUI adaptations. ProbUI replaces today’s static target models (bounding boxes) with probabilistic gestures (“bounding behaviours”). It is the first touch GUI framework to unite concepts from three areas of related work: 1) Developers declaratively define touch behaviours for GUI targets. As a key insight, the declarations imply simple probabilistic models (HMMs with 2D Gaussian emissions). 2) ProbUI derives these models automatically to evaluate users’ touch sequences. 3) It then infers intended behaviour and target. Developers bind callbacks to gesture progress, completion, and other conditions. We show ProbUI’s value by implementing existing and novel widgets, and report developer feedback from a survey and a lab study.
@InProceedings{buschek2017chi,
author = {Buschek, Daniel and Alt, Florian},
title = {ProbUI: Generalising Touch Target Representations to Enable Declarative Gesture Definition for Probabilistic GUIs},
booktitle = {Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems},
year = {2017},
series = {CHI '17},
pages = {4640--4653},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2017chi},
abstract = {We present ProbUI, a mobile touch GUI framework that merges ease of use of declarative gesture definition with the benefits of probabilistic reasoning. It helps developers to handle uncertain input and implement feedback and GUI adaptations. ProbUI replaces today's static target models (bounding boxes) with probabilistic gestures ("bounding behaviours"). It is the first touch GUI framework to unite concepts from three areas of related work: 1) Developers declaratively define touch behaviours for GUI targets. As a key insight, the declarations imply simple probabilistic models (HMMs with 2D Gaussian emissions). 2) ProbUI derives these models automatically to evaluate users' touch sequences. 3) It then infers intended behaviour and target. Developers bind callbacks to gesture progress, completion, and other conditions. We show ProbUI's value by implementing existing and novel widgets, and report developer feedback from a survey and a lab study.},
acmid = {3025502},
doi = {10.1145/3025453.3025502},
isbn = {978-1-4503-4655-9},
keywords = {gui framework, probabilistic modelling, touch gestures},
location = {Denver, Colorado, USA},
numpages = {14},
timestamp = {2017.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2017chi.pdf},
}
M. Eiband, M. Khamis, E. von Zezschwitz, H. Hussmann, and F. Alt, “Understanding shoulder surfing in the wild: stories from users and observers,” in Proceedings of the 2017 chi conference on human factors in computing systems, New York, NY, USA, 2017, p. 4254–4265. doi:10.1145/3025453.3025636
[BibTeX] [Abstract] [Download PDF]
Research has brought forth a variety of authentication systems to mitigate observation attacks. However, there is little work about shoulder surfing situations in the real world. We present the results of a user survey (N=174) in which we investigate actual stories about shoulder surfing on mobile devices from both users and observers. Our analysis indicates that shoulder surfing mainly occurs in an opportunistic, non-malicious way. It usually does not have serious consequences, but evokes negative feelings for both parties, resulting in a variety of coping strategies. Observed data was personal in most cases and ranged from information about interests and hobbies to login data and intimate details about third persons and relationships. Thus, our work contributes evidence for shoulder surfing in the real world and informs implications for the design of privacy protection mechanisms.
@InProceedings{eiband2017chi,
author = {Eiband, Malin and Khamis, Mohamed and von Zezschwitz, Emanuel and Hussmann, Heinrich and Alt, Florian},
title = {Understanding Shoulder Surfing in the Wild: Stories from Users and Observers},
booktitle = {Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems},
year = {2017},
series = {CHI '17},
pages = {4254--4265},
address = {New York, NY, USA},
publisher = {ACM},
note = {eiband2017chi},
abstract = {Research has brought forth a variety of authentication systems to mitigate observation attacks. However, there is little work about shoulder surfing situations in the real world. We present the results of a user survey (N=174) in which we investigate actual stories about shoulder surfing on mobile devices from both users and observers. Our analysis indicates that shoulder surfing mainly occurs in an opportunistic, non-malicious way. It usually does not have serious consequences, but evokes negative feelings for both parties, resulting in a variety of coping strategies. Observed data was personal in most cases and ranged from information about interests and hobbies to login data and intimate details about third persons and relationships. Thus, our work contributes evidence for shoulder surfing in the real world and informs implications for the design of privacy protection mechanisms.},
acmid = {3025636},
doi = {10.1145/3025453.3025636},
isbn = {978-1-4503-4655-9},
keywords = {mobile devices, privacy, shoulder surfing},
location = {Denver, Colorado, USA},
numpages = {12},
timestamp = {2017.05.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/eiband2017chi.pdf},
}
V. Gentile, M. Khamis, S. Sorce, and F. Alt, “They are looking at me! understanding how audience presence impacts on public display users,” in Proceedings of the 6th international symposium on pervasive displays, New York, NY, USA, 2017. doi:10.1145/3078810.3078822
[BibTeX] [Abstract] [Download PDF]
It is well known from prior work, that people interacting aswell as attending to a public display attract further peopleto interact. This behavior is commonly referred to as thehoneypot effect. At the same time, there are often situationswhere an audience is present in the vicinity of a public displaythat does not actively engage or pay attention to the display oran approaching user. However, it is largely unknown how sucha passive audience impacts on users or people who intendto interact. In this paper, we investigate the influence of apassive audience on the engagement of people with a publicdisplay. In more detail, we report on the deployment of adisplay in a public space. We collected and analyzed videologs to understand how people react to passive audience in thevicinity of public displays. We found an influence on whereinteracting users position themselves relative to both displayand passive audience as well as on their behavior. Our findingsare valuable for display providers and space owners who wantto maximize the display’s benefits.
@InProceedings{gentile2017perdis,
author = {Gentile, Vito and Khamis, Mohamed and Sorce, Salvatore and Alt, Florian},
title = {They are looking at me! Understanding how Audience Presence Impacts on Public Display Users},
booktitle = {Proceedings of the 6th International Symposium on Pervasive Displays},
year = {2017},
series = {PerDis '17},
address = {New York, NY, USA},
publisher = {ACM},
note = {gentile2017perdis},
abstract = {It is well known from prior work, that people interacting aswell as attending to a public display attract further peopleto interact. This behavior is commonly referred to as thehoneypot effect. At the same time, there are often situationswhere an audience is present in the vicinity of a public displaythat does not actively engage or pay attention to the display oran approaching user. However, it is largely unknown how sucha passive audience impacts on users or people who intendto interact. In this paper, we investigate the influence of apassive audience on the engagement of people with a publicdisplay. In more detail, we report on the deployment of adisplay in a public space. We collected and analyzed videologs to understand how people react to passive audience in thevicinity of public displays. We found an influence on whereinteracting users position themselves relative to both displayand passive audience as well as on their behavior. Our findingsare valuable for display providers and space owners who wantto maximize the display’s benefits.},
acmid = {3078822},
doi = {10.1145/3078810.3078822},
isbn = {978-1-4503-5045-7/17/06},
location = {Lugano, Switzerland},
numpages = {9},
timestamp = {2017.06.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/gentile2017perdis.pdf},
}
C. George, M. Khamis, M. Burger, H. Schmidt, F. Alt, and H. Hussmann, “Seamless and secure vr: adapting and evaluating established authentication systems for virtual reality,” in Proceedings of the usable security mini conference 2017, San Diego, CA, USA, 2017.
[BibTeX] [Abstract] [Download PDF]
Virtual reality (VR) headsets are enabling a wide range of newopportunities for the user. For example, in the near future usersmay be able to visit virtual shopping malls and virtually joininternational conferences. These and many other scenarios posenew questions with regards to privacy and security, in particularauthentication of users within the virtual environment. As a firststep towards seamless VR authentication, this paper investigatesthe direct transfer of well-established concepts (PIN, Androidunlock patterns) into VR. In a pilot study (N = 5) and a labstudy (N = 25), we adapted existing mechanisms and evaluatedtheir usability and security for VR. The results indicate thatboth PINs and patterns are well suited for authentication inVR. We found that the usability of both methods matched theperformance known from the physical world. In addition, theprivate visual channel makes authentication harder to observe,indicating that authentication in VR using traditional conceptsalready achieves a good balance in the trade-off between usabilityand security. The paper contributes to a better understanding ofauthentication within VR environments, by providing the firstinvestigation of established authentication methods within VR,and presents the base layer for the design of future authenticationschemes, which are used in VR environments only.
@InProceedings{george2017usec,
author = {Ceenu George AND Mohamed Khamis AND Marinus Burger AND Henri Schmidt AND Florian Alt AND Heinrich Hussmann},
title = {Seamless and Secure VR: Adapting and Evaluating Established Authentication Systems for Virtual Reality},
booktitle = {Proceedings of the Usable Security Mini Conference 2017},
year = {2017},
address = {San Diego, CA, USA},
publisher = {Internet Society},
note = {george2017usec},
abstract = {Virtual reality (VR) headsets are enabling a wide range of newopportunities for the user. For example, in the near future usersmay be able to visit virtual shopping malls and virtually joininternational conferences. These and many other scenarios posenew questions with regards to privacy and security, in particularauthentication of users within the virtual environment. As a firststep towards seamless VR authentication, this paper investigatesthe direct transfer of well-established concepts (PIN, Androidunlock patterns) into VR. In a pilot study (N = 5) and a labstudy (N = 25), we adapted existing mechanisms and evaluatedtheir usability and security for VR. The results indicate thatboth PINs and patterns are well suited for authentication inVR. We found that the usability of both methods matched theperformance known from the physical world. In addition, theprivate visual channel makes authentication harder to observe,indicating that authentication in VR using traditional conceptsalready achieves a good balance in the trade-off between usabilityand security. The paper contributes to a better understanding ofauthentication within VR environments, by providing the firstinvestigation of established authentication methods within VR,and presents the base layer for the design of future authenticationschemes, which are used in VR environments only.},
owner = {florian},
timestamp = {2017.02.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/george2017usec.pdf},
}
M. Hassib, D. Buschek, P. W. Wozniak, and F. Alt, “Heartchat: heart rate augmented mobile chat to support empathy and awareness,” in Proceedings of the 2017 chi conference on human factors in computing systems, New York, NY, USA, 2017, p. 2239–2251. doi:10.1145/3025453.3025758
[BibTeX] [Abstract] [Download PDF]
Textual communication via mobile phones suffers from a lack of context and emotional awareness. We present a mobile chat application, HeartChat, which integrates heart rate as a cue to increase awareness and empathy. Through a literature review and a focus group, we identified design dimensions important for heart rate augmented chats. We created three concepts showing heart rate per message, in real-time, or sending it explicitly. We tested our system in a two week in-the-wild study with 14 participants (7 pairs). Interviews and questionnaires showed that HeartChat supports empathy between people, in particular close friends and partners. Sharing heart rate helped them to implicitly understand each other’s context (e.g. location, physical activity) and emotional state, and sparked curiosity on special occasions. We discuss opportunities, challenges, and design implications for enriching mobile chats with physiological sensing.
@InProceedings{hassib2017chi1,
author = {Hassib, Mariam and Buschek, Daniel and Wozniak, Pawe\l W. and Alt, Florian},
title = {HeartChat: Heart Rate Augmented Mobile Chat to Support Empathy and Awareness},
booktitle = {Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems},
year = {2017},
series = {CHI '17},
pages = {2239--2251},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2017chi1},
abstract = {Textual communication via mobile phones suffers from a lack of context and emotional awareness. We present a mobile chat application, HeartChat, which integrates heart rate as a cue to increase awareness and empathy. Through a literature review and a focus group, we identified design dimensions important for heart rate augmented chats. We created three concepts showing heart rate per message, in real-time, or sending it explicitly. We tested our system in a two week in-the-wild study with 14 participants (7 pairs). Interviews and questionnaires showed that HeartChat supports empathy between people, in particular close friends and partners. Sharing heart rate helped them to implicitly understand each other's context (e.g. location, physical activity) and emotional state, and sparked curiosity on special occasions. We discuss opportunities, challenges, and design implications for enriching mobile chats with physiological sensing.},
acmid = {3025758},
doi = {10.1145/3025453.3025758},
isbn = {978-1-4503-4655-9},
keywords = {affective computing, heart rate, instant messagingg, physiological sensing},
location = {Denver, Colorado, USA},
numpages = {13},
timestamp = {2017.05.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017chi1.pdf},
}
M. Hassib, M. Pfeiffer, S. Schneegass, M. Rohs, and F. Alt, “Emotion actuator: embodied emotional feedback through electroencephalography and electrical muscle stimulation,” in Proceedings of the 2017 chi conference on human factors in computing systems, New York, NY, USA, 2017, p. 6133–6146. doi:10.1145/3025453.3025953
[BibTeX] [Abstract] [Download PDF]
The human body reveals emotional and bodily states through measurable signals, such as body language and electroencephalography. However, such manifestations are difficult to communicate to others remotely. We propose EmotionActuator, a proof-of-concept system to investigate the transmission of emotional states in which the recipient performs emotional gestures to understand and interpret the state of the sender.We call this kind of communication embodied emotional feedback, and present a prototype implementation. To realize our concept we chose four emotional states: amused, sad, angry, and neutral. We designed EmotionActuator through a series of studies to assess emotional classification via EEG, and create an EMS gesture set by comparing composed gestures from the literature to sign-language gestures. Through a final study with the end-to-end prototype interviews revealed that participants like implicit sharing of emotions and find the embodied output to be immersive, but want to have control over shared emotions and with whom. This work contributes a proof of concept system and set of design recommendations for designing embodied emotional feedback systems.
@InProceedings{hassib2017chi3,
author = {Hassib, Mariam and Pfeiffer, Max and Schneegass, Stefan and Rohs, Michael and Alt, Florian},
title = {Emotion Actuator: Embodied Emotional Feedback Through Electroencephalography and Electrical Muscle Stimulation},
booktitle = {Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems},
year = {2017},
series = {CHI '17},
pages = {6133--6146},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2017chi3},
abstract = {The human body reveals emotional and bodily states through measurable signals, such as body language and electroencephalography. However, such manifestations are difficult to communicate to others remotely. We propose EmotionActuator, a proof-of-concept system to investigate the transmission of emotional states in which the recipient performs emotional gestures to understand and interpret the state of the sender.We call this kind of communication embodied emotional feedback, and present a prototype implementation. To realize our concept we chose four emotional states: amused, sad, angry, and neutral. We designed EmotionActuator through a series of studies to assess emotional classification via EEG, and create an EMS gesture set by comparing composed gestures from the literature to sign-language gestures. Through a final study with the end-to-end prototype interviews revealed that participants like implicit sharing of emotions and find the embodied output to be immersive, but want to have control over shared emotions and with whom. This work contributes a proof of concept system and set of design recommendations for designing embodied emotional feedback systems.},
acmid = {3025953},
doi = {10.1145/3025453.3025953},
isbn = {978-1-4503-4655-9},
keywords = {affect display, affective computing, eeg., emotion, emotion sharing, ems},
location = {Denver, Colorado, USA},
numpages = {14},
timestamp = {2017.05.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017chi3.pdf},
}
M. Hassib, S. Schneegass, P. Eiglsperger, N. Henze, A. Schmidt, and F. Alt, “Engagemeter: a system for implicit audience engagement sensing using electroencephalography,” in Proceedings of the 2017 chi conference on human factors in computing systems, New York, NY, USA, 2017, p. 5114–5119. doi:10.1145/3025453.3025669
[BibTeX] [Abstract] [Download PDF]
{Obtaining information about audience engagement in presentations is a valuable asset for presenters in many domains. Prior literature mostly utilized explicit methods of collecting feedback which induce distractions, add workload on audience and do not provide objective information to presenters. We present EngageMeter – a system that allows fine-grained information on audience engagement to be obtained implicitly from multiple brain-computer interfaces (BCI) and to be fed back to presenters for real time and post-hoc access. Through evaluation during an HCI conference (Naudience=11
@InProceedings{hassib2017chi2,
author = {Hassib, Mariam and Schneegass, Stefan and Eiglsperger, Philipp and Henze, Niels and Schmidt, Albrecht and Alt, Florian},
title = {EngageMeter: A System for Implicit Audience Engagement Sensing Using Electroencephalography},
booktitle = {Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems},
year = {2017},
series = {CHI '17},
pages = {5114--5119},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2017chi2},
abstract = {Obtaining information about audience engagement in presentations is a valuable asset for presenters in many domains. Prior literature mostly utilized explicit methods of collecting feedback which induce distractions, add workload on audience and do not provide objective information to presenters. We present EngageMeter - a system that allows fine-grained information on audience engagement to be obtained implicitly from multiple brain-computer interfaces (BCI) and to be fed back to presenters for real time and post-hoc access. Through evaluation during an HCI conference (Naudience=11, Npresenters=3) we found that EngageMeter provides value to presenters (a) in real-time, since it allows reacting to current engagement scores by changing tone or adding pauses, and (b) in post-hoc, since presenters can adjust their slides and embed extra elements. We discuss how EngageMeter can be used in collocated and distributed audience sensing as well as how it can aid presenters in long term use.},
acmid = {3025669},
doi = {10.1145/3025453.3025669},
isbn = {978-1-4503-4655-9},
keywords = {audience feedback, bci, eeg, physiological sensing},
location = {Denver, Colorado, USA},
numpages = {6},
timestamp = {2017.05.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017chi2.pdf},
}
D. Huber, D. Buschek, and F. Alt, “Don’t leave: combining sensing technology and second screens to enhance the user experience with tv content,” in Proceedings of the 2017 acm international conference on interactive experiences for tv and online video, New York, NY, USA, 2017, p. 115–121. doi:10.1145/3077548.3077561
[BibTeX] [Abstract] [Download PDF]
In this paper we explore how the use of sensing technologies can enhance people’s experience during perceiving TV content. The work is motivated by an increasing number of sensors (such as Kinect) that find their way into living rooms. Such sensors allow the behavior of viewers to be analyzed, hence providing the opportunity to instantly react to this behavior. The particular idea we explore in our work is how a second screen app triggered by the viewer’s behavior can be designed to make them re-engage with the TV content. At the outset of our work we conducted a survey (N=411) to assess viewers’ activities while watching TV. Based on the findings we implemented a Kinect-based system to detect these activities and connected it with a playful second screen app. We then conducted a field evaluation (N=20) where we compared (a) four hints to direct users’ attention to the second screen app and (b) four types of second screen content requiring different levels of engagement. We conclude with implications for both practitioners and researchers concerned with interactive TV
@InProceedings{huber2017tvx,
author = {Huber, Daniela and Buschek, Daniel and Alt, Florian},
title = {Don't Leave: Combining Sensing Technology and Second Screens to Enhance the User Experience with TV Content},
booktitle = {Proceedings of the 2017 ACM International Conference on Interactive Experiences for TV and Online Video},
year = {2017},
series = {TVX '17},
pages = {115--121},
address = {New York, NY, USA},
publisher = {ACM},
note = {huber2017tvx},
abstract = {In this paper we explore how the use of sensing technologies can enhance people's experience during perceiving TV content. The work is motivated by an increasing number of sensors (such as Kinect) that find their way into living rooms. Such sensors allow the behavior of viewers to be analyzed, hence providing the opportunity to instantly react to this behavior. The particular idea we explore in our work is how a second screen app triggered by the viewer's behavior can be designed to make them re-engage with the TV content. At the outset of our work we conducted a survey (N=411) to assess viewers' activities while watching TV. Based on the findings we implemented a Kinect-based system to detect these activities and connected it with a playful second screen app. We then conducted a field evaluation (N=20) where we compared (a) four hints to direct users' attention to the second screen app and (b) four types of second screen content requiring different levels of engagement. We conclude with implications for both practitioners and researchers concerned with interactive TV},
acmid = {3077561},
doi = {10.1145/3077548.3077561},
isbn = {978-1-4503-4529-3},
keywords = {advertisements, interactive tv, kinect, user behavior},
location = {Hilversum, The Netherlands},
numpages = {7},
timestamp = {2017.05.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/huber2017tvx.pdf},
}
M. Khamis, R. Hasholzner, A. Bulling, and F. Alt, “Gtmopass: two-factor authentication on public displays using gazetouch passwords and personal mobile devices,” in Proceedings of the 6th international symposium on pervasive displays, New York, NY, USA, 2017. doi:10.1145/3078810.3078815
[BibTeX] [Abstract] [Download PDF]
As public displays continue to deliver increasingly private and personalized content, there is a need to ensure that only the legitimate users can access private information in sensitive contexts. While public displays can adopt similar authentication concepts like those used on public terminals (e.g., ATMs), authentication in public is subject to a number of risks. Namely, adversaries can uncover a user’s password through (1) shoulder surfing, (2) thermal attacks, or (3) smudge attacks. To address this problem we propose GTmoPass, an authentication architecture that enables Multi-factor user authentication on public displays. The first factor is a knowledge-factor: we employ a shoulder-surfing resilient multimodal scheme that combines gaze and touch input for password entry. The second factor is a possession-factor: users utilize their personal mobile devices, on which they enter the password. Credentials are securely transmitted to a server via Bluetooth beacons. We describe the implementation of GTmoPass and report on an evaluation of its usability and security, which shows that although authentication using GTmoPass is slightly slower than traditional methods, it protects against the three aforementioned threats.
@InProceedings{khamis2017perdis,
author = {Khamis, Mohamed and Hasholzner, Regina and Bulling, Andreas and Alt, Florian},
title = {GTmoPass: Two-factor Authentication on Public Displays Using GazeTouch passwords and Personal Mobile Devices},
booktitle = {Proceedings of the 6th International Symposium on Pervasive Displays},
year = {2017},
series = {PerDis '17},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2017perdis},
abstract = {As public displays continue to deliver increasingly private and personalized content, there is a need to ensure that only the legitimate users can access private information in sensitive contexts. While public displays can adopt similar authentication concepts like those used on public terminals (e.g., ATMs), authentication in public is subject to a number of risks. Namely, adversaries can uncover a user's password through (1) shoulder surfing, (2) thermal attacks, or (3) smudge attacks. To address this problem we propose GTmoPass, an authentication architecture that enables Multi-factor user authentication on public displays. The first factor is a knowledge-factor: we employ a shoulder-surfing resilient multimodal scheme that combines gaze and touch input for password entry. The second factor is a possession-factor: users utilize their personal mobile devices, on which they enter the password. Credentials are securely transmitted to a server via Bluetooth beacons. We describe the implementation of GTmoPass and report on an evaluation of its usability and security, which shows that although authentication using GTmoPass is slightly slower than traditional methods, it protects against the three aforementioned threats.},
acmid = {3078815},
doi = {10.1145/3078810.3078815},
isbn = {978-1-4503-5045-7/17/06},
location = {Lugano, Switzerland},
numpages = {9},
owner = {florian},
timestamp = {2017.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017perdis.pdf},
}
S. Oberhuber, T. Kothe, S. Schneegass, and F. Alt, “Augmented games: exploring design opportunities in ar settings with children,” in Proceedings of the 2017 conference on interaction design and children, New York, NY, USA, 2017, p. 371–377. doi:10.1145/3078072.3079734
[BibTeX] [Abstract] [Download PDF]
In this paper we investigate how Augmented Reality (AR) technology influences children during creative content generation in playful settings. The work is motivated by the recent spread of AR and the fact that children get in touch with this technology through their smart phones very early on. To understand the consequences, we implemented an app for smart mobile devices that allows children to create treasure hunts using GPS coordinates and marker-based AR functionality. During a qualitative user study, we asked students (n=27) to create traditional (paper + art supplies) and digital (paper + art supplies + AR app) treasure hunts and compared the resulting games, among other metrics, in terms of complexity, length and types of media used. Whereas traditional treasure hunts were linear, centered around locations and delivered information with text only, digital treasure hunts were more complex, focused on visual aspects and frequently integrated storytelling.
@InProceedings{oberhuber2017idc,
author = {Oberhuber, Sascha and Kothe, Tina and Schneegass, Stefan and Alt, Florian},
title = {Augmented Games: Exploring Design Opportunities in AR Settings With Children},
booktitle = {Proceedings of the 2017 Conference on Interaction Design and Children},
year = {2017},
series = {IDC '17},
pages = {371--377},
address = {New York, NY, USA},
publisher = {ACM},
note = {oberhuber2017idc},
abstract = {In this paper we investigate how Augmented Reality (AR) technology influences children during creative content generation in playful settings. The work is motivated by the recent spread of AR and the fact that children get in touch with this technology through their smart phones very early on. To understand the consequences, we implemented an app for smart mobile devices that allows children to create treasure hunts using GPS coordinates and marker-based AR functionality. During a qualitative user study, we asked students (n=27) to create traditional (paper + art supplies) and digital (paper + art supplies + AR app) treasure hunts and compared the resulting games, among other metrics, in terms of complexity, length and types of media used. Whereas traditional treasure hunts were linear, centered around locations and delivered information with text only, digital treasure hunts were more complex, focused on visual aspects and frequently integrated storytelling.},
acmid = {3079734},
doi = {10.1145/3078072.3079734},
isbn = {978-1-4503-4921-5},
keywords = {AR, children, creativity, education, storytelling},
location = {Stanford, California, USA},
numpages = {7},
timestamp = {2017.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/oberhuber2017idc.pdf},
}
R. Linke, T. Kothe, and F. Alt, “Tabooga: a hybrid learning app to support children’s reading motivation,” in Proceedings of the 2017 conference on interaction design and children, New York, NY, USA, 2017, p. 278–285. doi:10.1145/3078072.3079712
[BibTeX] [Abstract] [Download PDF]
In this paper we present TaBooGa (Tangible Book Game), a hybrid learning application we developed to increase children’s reading motivation. As children are exposed to digital devices early on (e.g., smart phones and tablets) weak readers are particularly apt to prefer digital offers over reading traditional books. Prior work has shown that ebooks can partially address this challenge by making reading more compelling for children. In this work we show that augmenting ebooks with tangible elements can further increase the reading motivation. In particular, we embed tangible elements that allow for navigating through the book as well as in the form of mini-games that interlace the reading task. We report on the results of an evaluation among 22 primary school pupils, comparing the influence of the approach on both strong and weak readers. Our results show a positive influence beyond reading motivation on both weak and strong readers. Yet, the approach requires to strive a balance between the tangible elements being motivating while at the same time not being too distracting.
@InProceedings{linke2017idc,
author = {Linke, Rebecca and Kothe, Tina and Alt, Florian},
title = {TaBooGa: A Hybrid Learning App to Support Children's Reading Motivation},
booktitle = {Proceedings of the 2017 Conference on Interaction Design and Children},
year = {2017},
series = {IDC '17},
pages = {278--285},
address = {New York, NY, USA},
publisher = {ACM},
note = {linke2017idc},
abstract = {In this paper we present TaBooGa (Tangible Book Game), a hybrid learning application we developed to increase children's reading motivation. As children are exposed to digital devices early on (e.g., smart phones and tablets) weak readers are particularly apt to prefer digital offers over reading traditional books. Prior work has shown that ebooks can partially address this challenge by making reading more compelling for children. In this work we show that augmenting ebooks with tangible elements can further increase the reading motivation. In particular, we embed tangible elements that allow for navigating through the book as well as in the form of mini-games that interlace the reading task. We report on the results of an evaluation among 22 primary school pupils, comparing the influence of the approach on both strong and weak readers. Our results show a positive influence beyond reading motivation on both weak and strong readers. Yet, the approach requires to strive a balance between the tangible elements being motivating while at the same time not being too distracting.},
acmid = {3079712},
doi = {10.1145/3078072.3079712},
isbn = {978-1-4503-4921-5},
keywords = {book-app, hybrid, literature, motivation, reading, tangible},
location = {Stanford, California, USA},
numpages = {8},
timestamp = {2017.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/linke2017idc.pdf},
}
M. Koch and F. Alt, “Allgegenwärtige mensch-computer-interaktion,” Informatik-spektrum, p. 1–6, 2017. doi:10.1007/s00287-017-1027-4
[BibTeX] [Abstract] [Download PDF]
Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verständlicher Benutzerschnittstellen – sowohl für Individuen als auch für Gruppen von Benutzern. Mit diesem Teilbereich der Informatik beschäftigt sich die Mensch-Maschine-Interaktion. Dieser Artikel gibt einen Einblick in die Forschungsaktivitäten zu diesem Thema an den Münchner Universitäten. Im Fokus stehen hierbei Arbeiten zu öffentlichen Bildschirmen, Blickinteraktion im öffentlichen Raum sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren.
@Article{koch2017informatikspektrum,
author = {Koch, Michael and Alt, Florian},
title = {Allgegenw{\"a}rtige Mensch-Computer-Interaktion},
journal = {Informatik-Spektrum},
year = {2017},
pages = {1--6},
issn = {1432-122X},
note = {koch2017informatikspektrum},
abstract = {Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verst{\"a}ndlicher Benutzerschnittstellen -- sowohl f{\"u}r Individuen als auch f{\"u}r Gruppen von Benutzern. Mit diesem Teilbereich der Informatik besch{\"a}ftigt sich die Mensch-Maschine-Interaktion. Dieser Artikel gibt einen Einblick in die Forschungsaktivit{\"a}ten zu diesem Thema an den M{\"u}nchner Universit{\"a}ten. Im Fokus stehen hierbei Arbeiten zu {\"o}ffentlichen Bildschirmen, Blickinteraktion im {\"o}ffentlichen Raum sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren. },
doi = {10.1007/s00287-017-1027-4},
timestamp = {2017.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/koch2017informatikspektrum.pdf},
}
O. Duerr, M. Khamis, D. Buschek, and F. Alt, “Helpme: assisting older adults in performing tasks on mobile devices,” in Proceedings of the chi 2017 workshop on designing mobile interactions for the ageing populations, New York, NY, USA, 2017.
[BibTeX] [Abstract] [Download PDF]
Although mobile devices are becoming more ubiquitous,older adults have trouble catching up with the dynamics oftechnological innovation in smartphones. Most custom solutionsfor them rely on a proprietary UI with an extenuatednumber of interaction possibilities. While these solutions dohelp with basic tasks such as calling the right person, manyof the benefits of having a smartphone are clearly dislodged.We introduce and evaluate a prototype, HelpMe, forolder adult users who want to use more demanding Appswithout external assistance. Through a prestudy we uncovereda set of behaviors that imply that the user needs assistance.By detecting these behaviors or by manual request,HelpMe overlays information that explain to the user whatcan be done on the current screen and what the different UIsymbols resemble. We evaluated HelpMe in a subsequentstudy where we collected feedback and measured the workload.Our findings show that older adult users would benefitfrom HelpMe, and that it reduces the perceived workload.
@InProceedings{duerr2017olderadults,
author = {Duerr, Oliver and Khamis, Mohamed and Buschek, Daniel and Alt, Florian},
title = {HelpMe: Assisting Older Adults in Performing Tasks on Mobile Devices},
booktitle = {Proceedings of the CHI 2017 Workshop on Designing Mobile Interactions for the Ageing Populations},
year = {2017},
address = {New York, NY, USA},
note = {duerr2017olderadults},
abstract = {Although mobile devices are becoming more ubiquitous,older adults have trouble catching up with the dynamics oftechnological innovation in smartphones. Most custom solutionsfor them rely on a proprietary UI with an extenuatednumber of interaction possibilities. While these solutions dohelp with basic tasks such as calling the right person, manyof the benefits of having a smartphone are clearly dislodged.We introduce and evaluate a prototype, HelpMe, forolder adult users who want to use more demanding Appswithout external assistance. Through a prestudy we uncovereda set of behaviors that imply that the user needs assistance.By detecting these behaviors or by manual request,HelpMe overlays information that explain to the user whatcan be done on the current screen and what the different UIsymbols resemble. We evaluated HelpMe in a subsequentstudy where we collected feedback and measured the workload.Our findings show that older adult users would benefitfrom HelpMe, and that it reduces the perceived workload.},
location = {Denver, CO, USA},
timestamp = {2017.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/duerr2017olderadults.pdf},
}
M. Koch and F. Alt, “Allgegenwärtige mensch-computer-interaktion,” in 50 jahre universitäts-informatik in münchen, A. Bode, M. Broy, H. Bungartz, and F. Matthes, Eds., Berlin, Heidelberg: Springer Berlin Heidelberg, 2017, p. 11–31. doi:10.1007/978-3-662-54712-0_2
[BibTeX] [Abstract] [Download PDF]
Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verständlicher Benutzerschnittstellen – sowohl für Individuen als auch für Gruppen von Benutzern. Mit diesem Teilbereich der Informatik beschäftigt sich die Mensch‐Computer‐Interaktion. Dieser Beitrag bietet zunächst eine kurze Einführung in die Forschungsmethodik der MCI und gibt einen Einblick in die Forschungsaktivitäten zu diesem Thema an den Münchner Universitäten. Im Fokus stehen hierbei Arbeiten zu öffentlichen Bildschirmen, Blickinteraktion im öffentlichen Raum, sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren.
@InBook{koch2017informatikmuenchen,
pages = {11--31},
title = {Allgegenw{\"a}rtige Mensch-Computer-Interaktion},
publisher = {Springer Berlin Heidelberg},
year = {2017},
author = {Koch, Michael and Alt, Florian},
editor = {Bode, Arndt and Broy, Manfred and Bungartz, Hans-Joachim and Matthes, Florian},
address = {Berlin, Heidelberg},
isbn = {978-3-662-54712-0},
note = {koch2017informatikmuenchen},
abstract = {Computer durchdringen unseren Alltag. Dabei sind diese derart in unsere Umgebung eingebettet, dass diese von uns nicht mehr als solche wahrgenommen werden. Hierdurch entsteht die Notwendigkeit zur Schaffung unmittelbar verständlicher Benutzerschnittstellen – sowohl für Individuen als auch für Gruppen von Benutzern. Mit diesem Teilbereich der Informatik beschäftigt sich die Mensch‐Computer‐Interaktion. Dieser Beitrag bietet zunächst eine kurze Einführung in die Forschungsmethodik der MCI und gibt einen Einblick in die Forschungsaktivitäten zu diesem Thema an den Münchner Universitäten. Im Fokus stehen hierbei Arbeiten zu öffentlichen Bildschirmen, Blickinteraktion im öffentlichen Raum, sowie die Entwicklung sicherer und gleichzeitig benutzbarer Authentifizierungsverfahren.},
booktitle = {50 Jahre Universit{\"a}ts-Informatik in M{\"u}nchen},
doi = {10.1007/978-3-662-54712-0_2},
timestamp = {2017.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/koch2017informatikmuenchen.pdf},
}
M. Braun, N. Broy, B. Pfleging, and F. Alt, “A design space for conversational in-vehicle information systems,” in Proceedings of the 19th international conference on human-computer interaction with mobile devices and services, New York, NY, USA, 2017, p. 79:1–79:8. doi:10.1145/3098279.3122122
[BibTeX] [Abstract] [Download PDF]
In this paper we chart a design space for conversational in-vehicle information systems (IVIS). Our work is motivated by the proliferation of speech interfaces in our everyday life, which have already found their way into consumer electronics and will most likely become pervasive in future cars. Our design space is based on expert interviews as well as a comprehensive literature review. We present five core dimensions – assistant, position, dialog design, system capabilities, and driver state – and show in an initial study how these dimensions affect the design of a prototypical IVIS. Design spaces have paved the way for much of the work done in HCI including but not limited to areas such as input and pointing devices, smart phones, displays, and automotive UIs. In a similar way, we expect our design space to aid practitioners in designing future IVIS but also researchers as they explore this young area of research.
@InProceedings{braun2017mobilehciadj,
author = {Braun, Michael and Broy, Nora and Pfleging, Bastian and Alt, Florian},
title = {A Design Space for Conversational In-vehicle Information Systems},
booktitle = {Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services},
year = {2017},
series = {MobileHCI '17},
pages = {79:1--79:8},
address = {New York, NY, USA},
publisher = {ACM},
note = {braun2017mobilehciadj},
abstract = {In this paper we chart a design space for conversational in-vehicle information systems (IVIS). Our work is motivated by the proliferation of speech interfaces in our everyday life, which have already found their way into consumer electronics and will most likely become pervasive in future cars. Our design space is based on expert interviews as well as a comprehensive literature review. We present five core dimensions - assistant, position, dialog design, system capabilities, and driver state - and show in an initial study how these dimensions affect the design of a prototypical IVIS. Design spaces have paved the way for much of the work done in HCI including but not limited to areas such as input and pointing devices, smart phones, displays, and automotive UIs. In a similar way, we expect our design space to aid practitioners in designing future IVIS but also researchers as they explore this young area of research.},
acmid = {3122122},
articleno = {79},
doi = {10.1145/3098279.3122122},
isbn = {978-1-4503-5075-4},
keywords = {automotive user interfaces, design space, natural language interfaces, speech interaction},
location = {Vienna, Austria},
numpages = {8},
timestamp = {2017.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/braun2017mobilehciadj.pdf},
}
A. Colley, J. Häkkilä, B. Pfleging, and F. Alt, “A design space for external displays on cars,” in Proceedings of the 9th international conference on automotive user interfaces and interactive vehicular applications adjunct, New York, NY, USA, 2017, p. 146–151. doi:10.1145/3131726.3131760
[BibTeX] [Abstract] [Download PDF]
The exterior surfaces of cars provide so far unutilized opportunities for information display. The exploitation of this space is enabled by current advances in display technologies combined with increased sensor integration, computing power, and connectivity in vehicles. With this motivation, we present a framework, mapping the design space for external vehicle displays. The audience for the displayed information may be other road users, pedestrians, or autonomous systems. This design direction is particularly interesting in the future, as the current direction towards driverless vehicles may be an enabler for increased separation, redesign, and repurposing of vehicle interior and exterior surfaces.
@InProceedings{colley2017autoui,
author = {Colley, Ashley and H\"{a}kkil\"{a}, Jonna and Pfleging, Bastian and Alt, Florian},
title = {A Design Space for External Displays on Cars},
booktitle = {Proceedings of the 9th International Conference on Automotive User Interfaces and Interactive Vehicular Applications Adjunct},
year = {2017},
series = {AutomotiveUI '17},
pages = {146--151},
address = {New York, NY, USA},
publisher = {ACM},
note = {colley2017autoui},
abstract = {The exterior surfaces of cars provide so far unutilized opportunities for information display. The exploitation of this space is enabled by current advances in display technologies combined with increased sensor integration, computing power, and connectivity in vehicles. With this motivation, we present a framework, mapping the design space for external vehicle displays. The audience for the displayed information may be other road users, pedestrians, or autonomous systems. This design direction is particularly interesting in the future, as the current direction towards driverless vehicles may be an enabler for increased separation, redesign, and repurposing of vehicle interior and exterior surfaces.},
acmid = {3131760},
doi = {10.1145/3131726.3131760},
isbn = {978-1-4503-5151-5},
keywords = {Automotive UI, cars, design space, interactive surfaces, public displays},
location = {Oldenburg, Germany},
numpages = {6},
timestamp = {2017.09.22},
url = {http://www.florian-alt.org/unibw/wp-content/publications/colley2017autoui.pdf},
}
L. Trotter, C. Mai, and F. Alt, “Carsketch: a collaborative sketching table with self-propelled tangible objects for automotive applications,” in Proceedings of the 9th international conference on automotive user interfaces and interactive vehicular applications adjunct, New York, NY, USA, 2017, p. 126–130. doi:10.1145/3131726.3131749
[BibTeX] [Abstract] [Download PDF]
We present CarSketch, a concept and prototype of a collaborative sketching table that supports interdisciplinary development teams during the early development phase of driver assistance systems. Due to the high costs caused by the use of physical prototypes, simulation is a common approach. Yet, the operation of state-of-the-art simulations is restricted to specialists, leaving the majority of stakeholders as passive observers. Our system for a collaborative and multi-perspective communication tool enables all participants to interact with the simulation. In particular, it (1) structures the ideation and development by providing a distraction-free environment with an easy-to-use drawing interface, (2) which is used by self-propelled tangibles to monitor and influence the simulation. (3) Additional information is provided by personal augmentation and (4) the simulation can be replayed in an immersive 3D environment. We expect the tool to be useful for multidisciplinary teams in fostering the ideation phase and finding conceptual mistakes more efficiently.
@InProceedings{trotter2017autouiadj,
author = {Trotter, Ludwig and Mai, Christian and Alt, Florian},
title = {CarSketch: A Collaborative Sketching Table with Self-Propelled Tangible Objects for Automotive Applications},
booktitle = {Proceedings of the 9th International Conference on Automotive User Interfaces and Interactive Vehicular Applications Adjunct},
year = {2017},
series = {AutomotiveUI '17},
pages = {126--130},
address = {New York, NY, USA},
publisher = {ACM},
note = {trotter2017autouiadj},
abstract = {We present CarSketch, a concept and prototype of a collaborative sketching table that supports interdisciplinary development teams during the early development phase of driver assistance systems. Due to the high costs caused by the use of physical prototypes, simulation is a common approach. Yet, the operation of state-of-the-art simulations is restricted to specialists, leaving the majority of stakeholders as passive observers. Our system for a collaborative and multi-perspective communication tool enables all participants to interact with the simulation. In particular, it (1) structures the ideation and development by providing a distraction-free environment with an easy-to-use drawing interface, (2) which is used by self-propelled tangibles to monitor and influence the simulation. (3) Additional information is provided by personal augmentation and (4) the simulation can be replayed in an immersive 3D environment. We expect the tool to be useful for multidisciplinary teams in fostering the ideation phase and finding conceptual mistakes more efficiently.},
acmid = {3131749},
doi = {10.1145/3131726.3131749},
isbn = {978-1-4503-5151-5},
keywords = {Automotive, collaborative work, simulation},
location = {Oldenburg, Germany},
numpages = {5},
timestamp = {2017.09.22},
url = {http://www.florian-alt.org/unibw/wp-content/publications/trotter2017autouiadj.pdf},
}
D. Buschek, M. Hackenschmied, and F. Alt, “Dynamic ui adaptations for one-handed use of large mobile touchscreen devices,” in Proceedings of the ifip conference on human-computer interaction, 2017, p. 184–201.
[BibTeX] [Abstract] [Download PDF]
We present and evaluate dynamic adaptations for mobiletouch GUIs. They mitigate reachability problems that users face whenoperating large smartphones or \phablets” with a single hand. In particular,we enhance common touch GUI elements with three simple animatedlocation and orientation changes (Roll, Bend, Move). Users cantrigger them to move GUI elements within comfortable reach. A labstudy (N=35) with two devices (4.95 in, 5.9 in) shows that these adaptationsimprove reachability on the larger device. They also reduce devicemovements required to reach the targets. Participants perceived adaptedUIs as faster, less exhausting and more comfortable to use than thebaselines. Feedback and video analyses also indicate that participants retaineda safer grip on the device through our adaptations. We concludewith design implications for (adaptive) touch GUIs on large devices.
@InProceedings{buschek2017interact,
author = {Buschek, Daniel and Hackenschmied, Maximilian and Alt, Florian},
title = {Dynamic UI Adaptations for One-Handed Use of Large Mobile Touchscreen Devices},
booktitle = {Proceedings of the IFIP Conference on Human-Computer Interaction},
year = {2017},
series = {INTERACT'17},
pages = {184--201},
organization = {Springer},
note = {buschek2017interact},
abstract = {We present and evaluate dynamic adaptations for mobiletouch GUIs. They mitigate reachability problems that users face whenoperating large smartphones or \phablets" with a single hand. In particular,we enhance common touch GUI elements with three simple animatedlocation and orientation changes (Roll, Bend, Move). Users cantrigger them to move GUI elements within comfortable reach. A labstudy (N=35) with two devices (4.95 in, 5.9 in) shows that these adaptationsimprove reachability on the larger device. They also reduce devicemovements required to reach the targets. Participants perceived adaptedUIs as faster, less exhausting and more comfortable to use than thebaselines. Feedback and video analyses also indicate that participants retaineda safer grip on the device through our adaptations. We concludewith design implications for (adaptive) touch GUIs on large devices.},
timestamp = {2017.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2017interact.pdf},
}
E. Lösch, F. Alt, and M. Koch, “Mirror, mirror on the wall: attracting passers-by to public touch displays with user representations,” in Proceedings of the 2017 acm international conference on interactive surfaces and spaces, New York, NY, USA, 2017, p. 22–31. doi:10.1145/3132272.3134129
[BibTeX] [Abstract] [Download PDF]
In this paper, we investigate how effectively users’ representations convey interactivity and foster interaction on large information touch displays. This research is motivated by the fact that user representations have been shown to be very efficient in playful applications that support mid-air interaction. At the same time, little is known about the effects of applying this approach to settings with a different primary mode of interaction, e.g. touch. It is also unclear how the playfulness of user representations influences the interest of users in the displayed information. To close this gap, we combine a touch display with screens showing life-sized video representations of passers-by. In a deployment, we compare different spatial arrangements to understand how passers-by are attracted and enticed to interact, how they explore the application, and how they socially behave. Findings reveal that (a) opposing displays foster interaction, but (b) may also reduce interaction at the main display; (c) a large intersection between focus and nimbus helps to notice interactivity; (d) using playful elements at information displays is not counterproductive; (e) mixed interaction modalities are hard to understand.
@InProceedings{loesch2017iss,
author = {L\"{o}sch, Eva and Alt, Florian and Koch, Michael},
title = {Mirror, Mirror on the Wall: Attracting Passers-by to Public Touch Displays With User Representations},
booktitle = {Proceedings of the 2017 ACM International Conference on Interactive Surfaces and Spaces},
year = {2017},
series = {ISS '17},
pages = {22--31},
address = {New York, NY, USA},
publisher = {ACM},
note = {loesch2017iss},
abstract = {In this paper, we investigate how effectively users' representations convey interactivity and foster interaction on large information touch displays. This research is motivated by the fact that user representations have been shown to be very efficient in playful applications that support mid-air interaction. At the same time, little is known about the effects of applying this approach to settings with a different primary mode of interaction, e.g. touch. It is also unclear how the playfulness of user representations influences the interest of users in the displayed information. To close this gap, we combine a touch display with screens showing life-sized video representations of passers-by. In a deployment, we compare different spatial arrangements to understand how passers-by are attracted and enticed to interact, how they explore the application, and how they socially behave. Findings reveal that (a) opposing displays foster interaction, but (b) may also reduce interaction at the main display; (c) a large intersection between focus and nimbus helps to notice interactivity; (d) using playful elements at information displays is not counterproductive; (e) mixed interaction modalities are hard to understand.},
acmid = {3134129},
doi = {10.1145/3132272.3134129},
isbn = {978-1-4503-4691-7},
keywords = {Public Displays, Touch Interaction, User Representations},
location = {Brighton, United Kingdom},
numpages = {10},
timestamp = {2017.10.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/loesch2017iss.pdf},
}
F. Alt and L. Ziegler, “PD-Survey – Supporting Audience-Centric Research through Surveys on Public Display Networks,” in Proceedings of the 25th international acm conference on multimedia, New York, NY, USA, 2017.
[BibTeX] [Abstract] [Download PDF]
We present PD-Survey, a platform to conduct surveys across a network of interactive screens. Our research is motivated by the fact that obtaining and analyzing data about users of public displays requires signi cant effort; e.g., running long-term observations or post-hoc analyses of video/interaction logs. As a result, research is often constrained to a single installation within a particular context, neither accounting for a diverse audience (children, shoppers, commuters) nor for different situations (waiting vs. passing by) or times of the day. As displays become networked, one way to address this challenge is through surveys on displays, where audience feedback is collected insitu. Since current tools do not appropriately address the requirements of a display network, we implemented a tool for use on public displays and report on its design and development. Our research is complemented by two in-the-wild deployments that (a) investigate di erent channels for feedback collection, (b) showcase how the work of researchers is supported, and (c) testify that the platform can easily be extended with novel features.
@InProceedings{alt2017mm,
author = {Alt, Florian AND Ziegler, Lukas},
title = {{PD-Survey - Supporting Audience-Centric Research through Surveys on Public Display Networks}},
booktitle = {Proceedings of the 25th International ACM Conference on Multimedia},
year = {2017},
series = {MM'17},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2017mm},
abstract = {We present PD-Survey, a platform to conduct surveys across a network of interactive screens. Our research is motivated by the fact that obtaining and analyzing data about users of public displays requires signi cant effort; e.g., running long-term observations or post-hoc analyses of video/interaction logs. As a result, research is often constrained to a single installation within a particular context, neither accounting for a diverse audience (children, shoppers, commuters) nor for different situations (waiting vs. passing by) or times of the day. As displays become networked, one way to address this challenge is through surveys on displays, where audience feedback is collected insitu. Since current tools do not appropriately address the requirements of a display network, we implemented a tool for use on public displays and report on its design and development. Our research is complemented by two in-the-wild deployments that (a) investigate di erent channels for feedback collection, (b) showcase how the work of researchers is supported, and (c) testify that the platform can easily be extended with novel features.},
keywords = {public displays, surveys},
location = {Mountain View, CA, USA},
numpages = {9},
timestamp = {2017.10.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2017mm.pdf},
}
M. Khamis, M. Hassib, E. von Zezschwitz, A. Bulling, and F. Alt, “Gazetouchpin: protecting sensitive data on mobile devices using secure multimodal authentication,” in Proceedings of the 19th acm international conference on multimodal interaction, New York, NY, USA, 2017. doi:10.1145/3136755.3136809
[BibTeX] [Abstract] [Download PDF]
Although mobile devices provide access to a plethora of sensitive data, most users still only protect them with PINs or patterns, which are vulnerable to side-channel attacks (e.g., shoulder surfing). How-ever, prior research has shown that privacy-aware users are willing to take further steps to protect their private data. We propose GazeTouchPIN, a novel secure authentication scheme for mobile devices that combines gaze and touch input. Our multimodal approach complicates shoulder-surfing attacks by requiring attackers to ob-serve the screen as well as the user’s eyes to and the password. We evaluate the security and usability of GazeTouchPIN in two user studies (N=30). We found that while GazeTouchPIN requires longer entry times, privacy aware users would use it on-demand when feeling observed or when accessing sensitive data. The results show that successful shoulder surfing attack rate drops from 68% to 10.4%when using GazeTouchPIN.
@InProceedings{khamis2017icmi,
author = {Khamis, Mohamed and Hassib, Mariam and von Zezschwitz, Emanuel and Bulling, Andreas and Alt, Florian},
title = {GazeTouchPIN: Protecting Sensitive Data on Mobile Devices using Secure Multimodal Authentication},
booktitle = {Proceedings of the 19th ACM International Conference on Multimodal Interaction},
year = {2017},
series = {ICMI 2017},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2017icmi},
abstract = {Although mobile devices provide access to a plethora of sensitive data, most users still only protect them with PINs or patterns, which are vulnerable to side-channel attacks (e.g., shoulder surfing). How-ever, prior research has shown that privacy-aware users are willing to take further steps to protect their private data. We propose GazeTouchPIN, a novel secure authentication scheme for mobile devices that combines gaze and touch input. Our multimodal approach complicates shoulder-surfing attacks by requiring attackers to ob-serve the screen as well as the user’s eyes to and the password. We evaluate the security and usability of GazeTouchPIN in two user studies (N=30). We found that while GazeTouchPIN requires longer entry times, privacy aware users would use it on-demand when feeling observed or when accessing sensitive data. The results show that successful shoulder surfing attack rate drops from 68% to 10.4%when using GazeTouchPIN.},
acmid = {3136809},
doi = {10.1145/3136755.3136809},
isbn = {978-1-4503-5543-8/17/11},
location = {Glasgow, Scotland},
numpages = {5},
timestamp = {2017.10.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017icmi.pdf},
}
M. Hassib, M. Khamis, S. Friedl, S. Schneegass, and F. Alt, “BrainAtWork: Logging Cognitive Engagement and Tasks in the Workplace Using Electroencephalography,” in Proceedings of the 16th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2017, p. 305–310. doi:10.1145/3152832.3152865
[BibTeX] [Abstract] [Download PDF]
Today’s workplaces are dynamic and complex. Digital data sources such as email and video conferencing aim to support workers but also add to their burden of multitasking. Psychophysiological sensors such as Electroencephalography (EEG) can provide users with cues about their cognitive state. We introduce BrainAtWork, a workplace engagement and task logger which shows users their cognitive state while working on different tasks. In a lab study with eleven participants working on their own real-world tasks, we gathered 16 hours of EEG and PC logs which were labeled into three classes: central, peripheral and meta work. We evaluated the usability of BrainAtWork via questionnaires and interviews. We investigated the correlations between measured cognitive engagement from EEG and subjective responses from experience sampling probes. Using random forests classification, we show the feasibility of automatically labeling work tasks into work classes. We discuss how BrainAtWork can support workers on the long term through encouraging reflection and helping in task scheduling.
@InProceedings{hassib2017mum,
author = {Hassib, Mariam and Khamis, Mohamed and Friedl, Susanne and Schneegass, Stefan and Alt, Florian},
title = {{BrainAtWork: Logging Cognitive Engagement and Tasks in the Workplace Using Electroencephalography}},
booktitle = {Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia},
year = {2017},
series = {MUM '17},
pages = {305--310},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2017mum},
abstract = {Today's workplaces are dynamic and complex. Digital data sources such as email and video conferencing aim to support workers but also add to their burden of multitasking. Psychophysiological sensors such as Electroencephalography (EEG) can provide users with cues about their cognitive state. We introduce BrainAtWork, a workplace engagement and task logger which shows users their cognitive state while working on different tasks. In a lab study with eleven participants working on their own real-world tasks, we gathered 16 hours of EEG and PC logs which were labeled into three classes: central, peripheral and meta work. We evaluated the usability of BrainAtWork via questionnaires and interviews. We investigated the correlations between measured cognitive engagement from EEG and subjective responses from experience sampling probes. Using random forests classification, we show the feasibility of automatically labeling work tasks into work classes. We discuss how BrainAtWork can support workers on the long term through encouraging reflection and helping in task scheduling.},
acmid = {3152865},
doi = {10.1145/3152832.3152865},
isbn = {978-1-4503-5378-6},
keywords = {EEG, multitasking, workplace logging},
location = {Stuttgart, Germany},
numpages = {6},
timestamp = {2017.11.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2017mum.pdf},
}
M. Khamis, L. Bandelow, S. Schick, D. Casadevall, A. Bulling, and F. Alt, “They are all after you: investigating the viability of a threat model that involves multiple shoulder surfers,” in Proceedings of the 16th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2017, p. 31–35. doi:10.1145/3152832.3152851
[BibTeX] [Abstract] [Download PDF]
Many of the authentication schemes for mobile devices that were proposed lately complicate shoulder surfing by splitting the attacker’s attention into two or more entities. For example, multimodal authentication schemes such as GazeTouchPIN and GazeTouchPass require attackers to observe the user’s gaze input and the touch input performed on the phone’s screen. These schemes have always been evaluated against single observers, while multiple observers could potentially attack these schemes with greater ease, since each of them can focus exclusively on one part of the password. In this work, we study the effectiveness of a novel threat model against authentication schemes that split the attacker’s attention. As a case study, we report on a security evaluation of two state of the art authentication schemes in the case of a team of two observers. Our results show that although multiple observers perform better against these schemes than single observers, multimodal schemes are significantly more secure against multiple observers compared to schemes that employ a single modality. We discuss how this threat model impacts the design of authentication schemes.
@InProceedings{khamis2017mum,
author = {Khamis, Mohamed and Bandelow, Linda and Schick, Stina and Casadevall, Dario and Bulling, Andreas and Alt, Florian},
title = {They Are All After You: Investigating the Viability of a Threat Model That Involves Multiple Shoulder Surfers},
booktitle = {Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia},
year = {2017},
series = {MUM '17},
pages = {31--35},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2017mum},
abstract = {Many of the authentication schemes for mobile devices that were proposed lately complicate shoulder surfing by splitting the attacker's attention into two or more entities. For example, multimodal authentication schemes such as GazeTouchPIN and GazeTouchPass require attackers to observe the user's gaze input and the touch input performed on the phone's screen. These schemes have always been evaluated against single observers, while multiple observers could potentially attack these schemes with greater ease, since each of them can focus exclusively on one part of the password. In this work, we study the effectiveness of a novel threat model against authentication schemes that split the attacker's attention. As a case study, we report on a security evaluation of two state of the art authentication schemes in the case of a team of two observers. Our results show that although multiple observers perform better against these schemes than single observers, multimodal schemes are significantly more secure against multiple observers compared to schemes that employ a single modality. We discuss how this threat model impacts the design of authentication schemes.},
acmid = {3152851},
doi = {10.1145/3152832.3152851},
isbn = {978-1-4503-5378-6},
keywords = {gaze gestures, multimodal authentication, multiple observers, privacy, shoulder surfing, threat model},
location = {Stuttgart, Germany},
numpages = {5},
timestamp = {2017.11.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017mum.pdf},
}
S. Prange, V. Müller, D. Buschek, and F. Alt, “Quakequiz – a case study on deploying a playful display application in a museum context,” in Proceedings of the 16th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2017. doi:10.1145/3152832.3152841
[BibTeX] [Abstract] [Download PDF]
In this paper, we present a case study in which we designed and implemented an interactive museum exhibit. In particular, we extended a section of the museum with an interactive quiz game. The project is an example of an opportunistic deployment where the needs of different stakeholders (museum administration, visitors, researchers) and the properties of the space needed to be considered. It is also an example of how we can apply knowledge on methodology and audience behavior collected over the past years by the research community. At the focus of this paper is (1) the design and concept phase that led to the initial idea for the exhibit, (2) the implementation phase, (3) a roll-out and early insights phase where we tested and refined the application in an iterative design process on-site, and (4) the final deployment as a permanent exhibit of the museum. We hope our report to be useful for researchers and practitioners designing systems for similar contexts.
@InProceedings{prange2017mum,
author = {Sarah Prange and Victoria M\"uller and Daniel Buschek and Florian Alt},
title = {QuakeQuiz - A Case Study on Deploying a Playful Display Application in a Museum Context},
booktitle = {Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia},
year = {2017},
series = {MUM '17},
address = {New York, NY, USA},
publisher = {ACM},
note = {prange2017mum},
abstract = {In this paper, we present a case study in which we designed and implemented an interactive museum exhibit. In particular, we extended a section of the museum with an interactive quiz game. The project is an example of an opportunistic deployment where the needs of different stakeholders (museum administration, visitors, researchers) and the properties of the space needed to be considered. It is also an example of how we can apply knowledge on methodology and audience behavior collected over the past years by the research community. At the focus of this paper is (1) the design and concept phase that led to the initial idea for the exhibit, (2) the implementation phase, (3) a roll-out and early insights phase where we tested and refined the application in an iterative design process on-site, and (4) the final deployment as a permanent exhibit of the museum. We hope our report to be useful for researchers and practitioners designing systems for similar contexts.},
doi = {10.1145/3152832.3152841},
location = {Stuttgart, Germany},
timestamp = {2017.11.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/prange2017mum.pdf},
}
D. Buschek, J. Kinshofer, and F. Alt, “A Comparative Evaluation of Spatial Targeting Behaviour Patterns for Finger and Stylus Tapping on Mobile Touchscreen Devices,” , New York, NY, USA, 2017, p. 126:1–126:21. doi:10.1145/3161160
[BibTeX] [Abstract] [Download PDF]
Models of 2D targeting error patterns have been applied as a valuable computational tool for analysing finger touch behaviour on mobile devices, improving touch accuracy and inferring context. However, their use in stylus input is yet unexplored. This paper presents the first empirical study and analyses of such models for tapping with a stylus. In a user study (N = 28), we collected targeting data on a smartphone, both for stationary use (sitting) and walking. We compare targeting patterns between index finger input and three stylus variations – two stylus widths and nib types as well as the addition of a hover cursor. Our analyses reveal that stylus targeting patterns are user-specific, and that offset models improve stylus tapping accuracy, but less so than for finger touch. Input method has a stronger influence on targeting patterns than mobility, and stylus width is more influential than the hover cursor. Stylus models improve finger accuracy as well, but not vice versa. The extent of the stylus accuracy advantage compared to the finger depends on screen location and mobility. We also discuss patterns related to mobility and gliding of the stylus on the screen. We conclude with implications for target sizes and offset model applications.
@InProceedings{buschek2017imwut,
author = {Buschek, Daniel and Kinshofer, Julia and Alt, Florian},
title = {{A Comparative Evaluation of Spatial Targeting Behaviour Patterns for Finger and Stylus Tapping on Mobile Touchscreen Devices}},
year = {2017},
volume = {1},
number = {4},
pages = {126:1--126:21},
address = {New York, NY, USA},
month = jan,
publisher = {ACM},
note = {buschek2017imwut},
abstract = {Models of 2D targeting error patterns have been applied as a valuable computational tool for analysing finger touch behaviour on mobile devices, improving touch accuracy and inferring context. However, their use in stylus input is yet unexplored. This paper presents the first empirical study and analyses of such models for tapping with a stylus. In a user study (N = 28), we collected targeting data on a smartphone, both for stationary use (sitting) and walking. We compare targeting patterns between index finger input and three stylus variations -- two stylus widths and nib types as well as the addition of a hover cursor. Our analyses reveal that stylus targeting patterns are user-specific, and that offset models improve stylus tapping accuracy, but less so than for finger touch. Input method has a stronger influence on targeting patterns than mobility, and stylus width is more influential than the hover cursor. Stylus models improve finger accuracy as well, but not vice versa. The extent of the stylus accuracy advantage compared to the finger depends on screen location and mobility. We also discuss patterns related to mobility and gliding of the stylus on the screen. We conclude with implications for target sizes and offset model applications.},
acmid = {3161160},
articleno = {126},
doi = {10.1145/3161160},
issn = {2474-9567},
issue_date = {December 2017},
journal = {Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.},
keywords = {Gaussian Process regression, Stylus input, computational interaction, offset model},
numpages = {21},
timestamp = {2017.10.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2017imwut.pdf},
}

2016

F. Alt, A. Bulling, L. Mecke, and D. Buschek, “Attention, please!: comparing features for measuring audience attention towards pervasive displays,” in Proceedings of the 2016 acm conference on designing interactive systems, New York, NY, USA, 2016, p. 823–828. doi:10.1145/2901790.2901897
[BibTeX] [Abstract] [Download PDF]
Measuring audience attention towards pervasive displays is important but accurate measurement in real time remains a significant sensing challenge. Consequently, researchers and practitioners typically use other features, such as face presence, as a proxy. We provide a principled comparison of the performance of six features and their combinations for measuring attention: face presence, movement trajectory, walking speed, shoulder orientation, head pose, and gaze direction. We implemented a prototype that is capable of capturing this rich set of features from video and depth camera data. Using a controlled lab experiment (N=18) we show that as a single feature, face presence is indeed among the most accurate. We further show that accuracy can be increased through a combination of features (+10.3%), knowledge about the audience (+63.8%), as well as user identities (+69.0%). Our findings are valuable for display providers who want to collect data on display effectiveness or build interactive, responsive apps.
@InProceedings{alt2016dis,
author = {Alt, Florian and Bulling, Andreas and Mecke, Lukas and Buschek, Daniel},
title = {Attention, Please!: Comparing Features for Measuring Audience Attention Towards Pervasive Displays},
booktitle = {Proceedings of the 2016 ACM Conference on Designing Interactive Systems},
year = {2016},
series = {DIS '16},
pages = {823--828},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2016dis},
abstract = {Measuring audience attention towards pervasive displays is important but accurate measurement in real time remains a significant sensing challenge. Consequently, researchers and practitioners typically use other features, such as face presence, as a proxy. We provide a principled comparison of the performance of six features and their combinations for measuring attention: face presence, movement trajectory, walking speed, shoulder orientation, head pose, and gaze direction. We implemented a prototype that is capable of capturing this rich set of features from video and depth camera data. Using a controlled lab experiment (N=18) we show that as a single feature, face presence is indeed among the most accurate. We further show that accuracy can be increased through a combination of features (+10.3%), knowledge about the audience (+63.8%), as well as user identities (+69.0%). Our findings are valuable for display providers who want to collect data on display effectiveness or build interactive, responsive apps.},
acmid = {2901897},
doi = {10.1145/2901790.2901897},
isbn = {978-1-4503-4031-1},
keywords = {audience funnel, interaction, phases, public displays, zones},
location = {Brisbane, QLD, Australia},
numpages = {6},
timestamp = {2016.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016dis.pdf},
}
F. Alt, M. Mikusz, S. Schneegass, and A. Bulling, “Memorability of cued-recall graphical passwords with saliency masks,” in Proceedings of the 15th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2016, p. 191–200. doi:10.1145/3012709.3012730
[BibTeX] [Abstract] [Download PDF]
Cued-recall graphical passwords have a lot of potential for secure user authentication, particularly if combined with saliency masks to prevent users from selecting weak passwords. Saliency masks were shown to significantly improve password security by excluding those areas of the image that are most likely to lead to hotspots. In this paper we investigate the impact of such saliency masks on the memorability of cued-recall graphical passwords. We first conduct two pre-studies (N=52) to obtain a set of images with three different image complexities as well as real passwords. A month-long user study (N=26) revealed that there is a strong learning effect for graphical passwords, in particular if defined on images with a saliency mask. While for complex images, the learning curve is steeper than for less complex ones, they best supported memorability in the long term, most likely because they provided users more alternatives to select memorable password points. These results complement prior work on the security of such passwords and underline the potential of saliency masks as both a secure and usable improvement to cued-recall gaze-based graphical passwords.
@InProceedings{alt2016mum,
author = {Alt, Florian and Mikusz, Mateusz and Schneegass, Stefan and Bulling, Andreas},
title = {Memorability of Cued-recall Graphical Passwords with Saliency Masks},
booktitle = {Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia},
year = {2016},
series = {MUM '16},
pages = {191--200},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2016mum},
abstract = {Cued-recall graphical passwords have a lot of potential for secure user authentication, particularly if combined with saliency masks to prevent users from selecting weak passwords. Saliency masks were shown to significantly improve password security by excluding those areas of the image that are most likely to lead to hotspots. In this paper we investigate the impact of such saliency masks on the memorability of cued-recall graphical passwords. We first conduct two pre-studies (N=52) to obtain a set of images with three different image complexities as well as real passwords. A month-long user study (N=26) revealed that there is a strong learning effect for graphical passwords, in particular if defined on images with a saliency mask. While for complex images, the learning curve is steeper than for less complex ones, they best supported memorability in the long term, most likely because they provided users more alternatives to select memorable password points. These results complement prior work on the security of such passwords and underline the potential of saliency masks as both a secure and usable improvement to cued-recall gaze-based graphical passwords.},
acmid = {3012730},
doi = {10.1145/3012709.3012730},
isbn = {978-1-4503-4860-7},
keywords = {cued-recall graphical passwords, memorability, saliency masks, user authentication, user study},
location = {Rovaniemi, Finland},
numpages = {10},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016mum.pdf},
}
F. Alt, S. Torma, and D. Buschek, “Don’t disturb me: understanding secondary tasks on public displays,” in Proceedings of the 5th acm international symposium on pervasive displays, New York, NY, USA, 2016, p. 1–12. doi:10.1145/2914920.2915023
[BibTeX] [Abstract] [Download PDF]
A growing number of displays provide information and applications in public spaces. Most applications today are considered to pose one task to the user, such as navigating a map. In contrast to such primary tasks, secondary tasks have yet received little attention in research, despite practical relevance. For example, a secondary task might occur by displaying special ticket offers to a tourist browsing a city map for attractions. This paper investigates secondary tasks with two key-contributions: First, we describe a design space for secondary tasks on public displays, identifying dimensions of interest to application designers. Second, we present a user study with text entry and mental arithmetic tasks to assess how secondary tasks influence performance in the primary task depending on two main dimensions – difficulty and temporal integration. We report performance (completion times, error rates) and subjective user ratings, such as distraction and frustration. Analysis of gaze data suggests three main strategies of how users switch between primary and secondary tasks. Based on our findings, we conclude with recommendations for designing apps with secondary tasks on public displays.
@InProceedings{alt2016perdis1,
author = {Alt, Florian and Torma, Sarah and Buschek, Daniel},
title = {Don't Disturb Me: Understanding Secondary Tasks on Public Displays},
booktitle = {Proceedings of the 5th ACM International Symposium on Pervasive Displays},
year = {2016},
series = {PerDis '16},
pages = {1--12},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2016perdis1},
abstract = {A growing number of displays provide information and applications in public spaces. Most applications today are considered to pose one task to the user, such as navigating a map. In contrast to such primary tasks, secondary tasks have yet received little attention in research, despite practical relevance. For example, a secondary task might occur by displaying special ticket offers to a tourist browsing a city map for attractions. This paper investigates secondary tasks with two key-contributions: First, we describe a design space for secondary tasks on public displays, identifying dimensions of interest to application designers. Second, we present a user study with text entry and mental arithmetic tasks to assess how secondary tasks influence performance in the primary task depending on two main dimensions -- difficulty and temporal integration. We report performance (completion times, error rates) and subjective user ratings, such as distraction and frustration. Analysis of gaze data suggests three main strategies of how users switch between primary and secondary tasks. Based on our findings, we conclude with recommendations for designing apps with secondary tasks on public displays.},
acmid = {2915023},
doi = {10.1145/2914920.2915023},
isbn = {978-1-4503-4366-4},
keywords = {mental workload, parallel-task environment, public display, secondary task performance},
location = {Oulu, Finland},
numpages = {12},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016perdis1.pdf},
}
F. Alt and J. Vehns, “Opportunistic deployments: challenges and opportunities of conducting public display research at an airport,” in Proceedings of the 5th acm international symposium on pervasive displays, New York, NY, USA, 2016, p. 106–117. doi:10.1145/2914920.2915020
[BibTeX] [Abstract] [Download PDF]
In this paper, we report on the design, development, and deployment of an interactive shopping display at a major European airport. The ability to manufacture displays in arbitrary size and form factors as well as their networking capabilities allow public displays to be deployed in almost any location and target a huge variety of audiences. At the same time, this makes it difficult for researchers to gather generalizable insights on audience behavior. Rather, findings are often very specific to a particular deployment. We argue that in order to develop a comprehensive understanding of how successful interactive display installations can be created, researchers need to explore an as large variety of situations as possible. We contribute to this understanding by providing insights from a deployment in a security critical environment and involving multiple stakeholders where the audience is encountered in different situations (waiting, passing-by). Our insights are valuable for both researchers and practitioners, operating interactive display deployments.
@InProceedings{alt2016perdis2,
author = {Alt, Florian and Vehns, Julia},
title = {Opportunistic Deployments: Challenges and Opportunities of Conducting Public Display Research at an Airport},
booktitle = {Proceedings of the 5th ACM International Symposium on Pervasive Displays},
year = {2016},
series = {PerDis '16},
pages = {106--117},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2016perdis2},
abstract = {In this paper, we report on the design, development, and deployment of an interactive shopping display at a major European airport. The ability to manufacture displays in arbitrary size and form factors as well as their networking capabilities allow public displays to be deployed in almost any location and target a huge variety of audiences. At the same time, this makes it difficult for researchers to gather generalizable insights on audience behavior. Rather, findings are often very specific to a particular deployment. We argue that in order to develop a comprehensive understanding of how successful interactive display installations can be created, researchers need to explore an as large variety of situations as possible. We contribute to this understanding by providing insights from a deployment in a security critical environment and involving multiple stakeholders where the audience is encountered in different situations (waiting, passing-by). Our insights are valuable for both researchers and practitioners, operating interactive display deployments.},
acmid = {2915020},
doi = {10.1145/2914920.2915020},
isbn = {978-1-4503-4366-4},
keywords = {audience behavior, deployment-based research, interaction, public displays, shopping},
location = {Oulu, Finland},
numpages = {12},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2016perdis2.pdf},
}
M. Baldauf, F. Adegeye, F. Alt, and J. Harms, “Your browser is the controller: advanced web-based smartphone remote controls for public screens,” in Proceedings of the 5th acm international symposium on pervasive displays, New York, NY, USA, 2016, p. 175–181. doi:10.1145/2914920.2915026
[BibTeX] [Abstract] [Download PDF]
In recent years, a lot of research focused on using smartphones as input devices for distant screens, in many cases by means of native applications. At the same time, prior work often ignored the downsides of native applications for practical usage, such as the need for download and the required installation process. This hampers the spontaneous use of an interactive service. To address the aforementioned drawbacks, we introduce ATREUS, an open-source framework which enables creating and provisioning manifold mobile remote controls as plain web applications. We describe the basic architecture of ATREUS and present four functional remote controls realized using the framework. Two sophisticated controls, the Mini Video and the Smart Lens approach, have been previously implemented as native applications only. Furthermore, we report on lessons learned for realizing web-based remote controls during functional tests and finally present the results of an informal user study.
@InProceedings{baldauf2016perdis,
author = {Baldauf, Matthias and Adegeye, Florence and Alt, Florian and Harms, Johannes},
title = {Your Browser is the Controller: Advanced Web-based Smartphone Remote Controls for Public Screens},
booktitle = {Proceedings of the 5th ACM International Symposium on Pervasive Displays},
year = {2016},
series = {PerDis '16},
pages = {175--181},
address = {New York, NY, USA},
publisher = {ACM},
note = {baldauf2016perdis},
abstract = {In recent years, a lot of research focused on using smartphones as input devices for distant screens, in many cases by means of native applications. At the same time, prior work often ignored the downsides of native applications for practical usage, such as the need for download and the required installation process. This hampers the spontaneous use of an interactive service. To address the aforementioned drawbacks, we introduce ATREUS, an open-source framework which enables creating and provisioning manifold mobile remote controls as plain web applications. We describe the basic architecture of ATREUS and present four functional remote controls realized using the framework. Two sophisticated controls, the Mini Video and the Smart Lens approach, have been previously implemented as native applications only. Furthermore, we report on lessons learned for realizing web-based remote controls during functional tests and finally present the results of an informal user study.},
acmid = {2915026},
doi = {10.1145/2914920.2915026},
isbn = {978-1-4503-4366-4},
keywords = {interaction, public display, remote control, smartphone},
location = {Oulu, Finland},
numpages = {7},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/baldauf2016perdis.pdf},
}
N. Broy, V. Lindner, and F. Alt, “The s3d-ui designer: creating user interface prototypes for 3d displays,” in Proceedings of the 15th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2016, p. 49–55. doi:10.1145/3012709.3012727
[BibTeX] [Abstract] [Download PDF]
In this paper, we present the S3D-UI Designer –- a tool to create prototypes for 3D displays. Stereoscopic (S3D) displays are quickly becoming popular beyond cinemas and home entertainments. S3D displays can today already be found in mobile phones, public displays, and car dashboards. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. At the same time, prototyping these UIs is challenging, as with traditional techniques, UI elements cannot easily be rendered and positioned in 3D space. In contrast to professional 3D authoring tools, we present a tool targeted towards non-experts to quickly and easily sketch a S3D UI and instantly render it on a 3D display. We report on the design of the tool by means of a workshop and present an evaluation study with 26 participants assessing its usabilty.
@InProceedings{broy2016mum,
author = {Broy, Nora and Lindner, Verena and Alt, Florian},
title = {The S3D-UI Designer: Creating User Interface Prototypes for 3D Displays},
booktitle = {Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia},
year = {2016},
series = {MUM '16},
pages = {49--55},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2016mum},
abstract = {In this paper, we present the S3D-UI Designer --- a tool to create prototypes for 3D displays. Stereoscopic (S3D) displays are quickly becoming popular beyond cinemas and home entertainments. S3D displays can today already be found in mobile phones, public displays, and car dashboards. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. At the same time, prototyping these UIs is challenging, as with traditional techniques, UI elements cannot easily be rendered and positioned in 3D space. In contrast to professional 3D authoring tools, we present a tool targeted towards non-experts to quickly and easily sketch a S3D UI and instantly render it on a 3D display. We report on the design of the tool by means of a workshop and present an evaluation study with 26 participants assessing its usabilty.},
acmid = {3012727},
doi = {10.1145/3012709.3012727},
isbn = {978-1-4503-4860-7},
keywords = {prototyping, stereoscopic 3D, user interfaces},
location = {Rovaniemi, Finland},
numpages = {7},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2016mum.pdf},
}
D. Buschek, A. De Luca, and F. Alt, “Evaluating the influence of targets and hand postures on touch-based behavioural biometrics,” in Proceedings of the 2016 chi conference on human factors in computing systems, New York, NY, USA, 2016, p. 1349–1361. doi:10.1145/2858036.2858165
[BibTeX] [Abstract] [Download PDF]
Users’ individual differences in their mobile touch behaviour can help to continuously verify identity and protect personal data. However, little is known about the influence of GUI elements and hand postures on such touch biometrics. Thus, we present a metric to measure the amount of user-revealing information that can be extracted from touch targeting interactions and apply it in eight targeting tasks with over 150,000 touches from 24 users in two sessions. We compare touch-to-target offset patterns for four target types and two hand postures. Our analyses reveal that small, compactly shaped targets near screen edges yield the most descriptive touch targeting patterns. Moreover, our results show that thumb touches are more individual than index finger ones. We conclude that touch-based user identification systems should analyse GUI layouts and infer hand postures. We also describe a framework to estimate the usefulness of GUIs for touch biometrics.
@InProceedings{buschek2016chi2,
author = {Buschek, Daniel and De Luca, Alexander and Alt, Florian},
title = {Evaluating the Influence of Targets and Hand Postures on Touch-based Behavioural Biometrics},
booktitle = {Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems},
year = {2016},
series = {CHI '16},
pages = {1349--1361},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2016chi2},
abstract = {Users' individual differences in their mobile touch behaviour can help to continuously verify identity and protect personal data. However, little is known about the influence of GUI elements and hand postures on such touch biometrics. Thus, we present a metric to measure the amount of user-revealing information that can be extracted from touch targeting interactions and apply it in eight targeting tasks with over 150,000 touches from 24 users in two sessions. We compare touch-to-target offset patterns for four target types and two hand postures. Our analyses reveal that small, compactly shaped targets near screen edges yield the most descriptive touch targeting patterns. Moreover, our results show that thumb touches are more individual than index finger ones. We conclude that touch-based user identification systems should analyse GUI layouts and infer hand postures. We also describe a framework to estimate the usefulness of GUIs for touch biometrics.},
acmid = {2858165},
doi = {10.1145/2858036.2858165},
isbn = {978-1-4503-3362-7},
keywords = {behavioural biometrics, mobile device, touch targeting},
location = {San Jose, California, USA},
numpages = {13},
timestamp = {2016.05.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2016chi2.pdf},
}
D. Buschek, F. Hartmann, E. von Zezschwitz, A. De Luca, and F. Alt, “Snapapp: reducing authentication overhead with a time-constrained fast unlock option,” in Proceedings of the 2016 chi conference on human factors in computing systems, New York, NY, USA, 2016, p. 3736–3747. doi:10.1145/2858036.2858164
[BibTeX] [Abstract] [Download PDF]
We present SnapApp, a novel unlock concept for mobile devices that reduces authentication overhead with a time-constrained quick-access option. SnapApp provides two unlock methods at once: While PIN entry enables full access to the device, users can also bypass authentication with a short sliding gesture (“Snap”). This grants access for a limited amount of time (e.g. 30 seconds). The device then automatically locks itself upon expiration. Our concept further explores limiting the possible number of Snaps in a row, and configuring blacklists for app use during short access (e.g. to exclude banking apps). We discuss opportunities and challenges of this concept based on a 30-day field study with 18 participants, including data logging and experience sampling methods. Snaps significantly reduced unlock times, and our app was perceived to offer a good tradeoff. Conceptual challenges include, for example, supporting users in configuring their blacklists.
@InProceedings{buschek2016chi1,
author = {Buschek, Daniel and Hartmann, Fabian and von Zezschwitz, Emanuel and De Luca, Alexander and Alt, Florian},
title = {SnapApp: Reducing Authentication Overhead with a Time-Constrained Fast Unlock Option},
booktitle = {Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems},
year = {2016},
series = {CHI '16},
pages = {3736--3747},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2016chi1},
abstract = {We present SnapApp, a novel unlock concept for mobile devices that reduces authentication overhead with a time-constrained quick-access option. SnapApp provides two unlock methods at once: While PIN entry enables full access to the device, users can also bypass authentication with a short sliding gesture ("Snap"). This grants access for a limited amount of time (e.g. 30 seconds). The device then automatically locks itself upon expiration. Our concept further explores limiting the possible number of Snaps in a row, and configuring blacklists for app use during short access (e.g. to exclude banking apps). We discuss opportunities and challenges of this concept based on a 30-day field study with 18 participants, including data logging and experience sampling methods. Snaps significantly reduced unlock times, and our app was perceived to offer a good tradeoff. Conceptual challenges include, for example, supporting users in configuring their blacklists.},
acmid = {2858164},
doi = {10.1145/2858036.2858164},
isbn = {978-1-4503-3362-7},
keywords = {smartphone authentication, time-constrained device access, usable privacy and security},
location = {San Jose, California, USA},
numpages = {12},
timestamp = {2016.05.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2016chi1.pdf},
}
R. Haeuslschmid, B. Pfleging, and F. Alt, “A design space to support the development of windshield applications for the car,” in Proceedings of the 2016 chi conference on human factors in computing systems, New York, NY, USA, 2016, p. 5076–5091. doi:10.1145/2858036.2858336
[BibTeX] [Abstract] [Download PDF]
In this paper we present a design space for interactive windshield displays in vehicles and discuss how this design space can support designers in creating windshield applications for drivers, passengers, and pedestrians. Our work is motivated by numerous examples in other HCI-related areas where seminal design space papers served as a valuable basis to evolve the respective field – most notably mobile devices, automotive user interfaces, and interactive public displays. The presented design space is based on a comprehensive literature review. Furthermore we present a classification of 211 windshield applications, derived from a survey of research projects and commercial products as well as from focus groups. We showcase the utility of our work for designers of windshield applications through two scenarios. Overall, our design space can help building applications for diverse use cases. This includes apps inside and outside the car as well as applications for specific areas (fire fighters, police, ambulance).
@InProceedings{haeuslschmid2016chi,
author = {Haeuslschmid, Renate and Pfleging, Bastian and Alt, Florian},
title = {A Design Space to Support the Development of Windshield Applications for the Car},
booktitle = {Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems},
year = {2016},
series = {CHI '16},
pages = {5076--5091},
address = {New York, NY, USA},
publisher = {ACM},
note = {haeuslschmid2016chi},
abstract = {In this paper we present a design space for interactive windshield displays in vehicles and discuss how this design space can support designers in creating windshield applications for drivers, passengers, and pedestrians. Our work is motivated by numerous examples in other HCI-related areas where seminal design space papers served as a valuable basis to evolve the respective field -- most notably mobile devices, automotive user interfaces, and interactive public displays. The presented design space is based on a comprehensive literature review. Furthermore we present a classification of 211 windshield applications, derived from a survey of research projects and commercial products as well as from focus groups. We showcase the utility of our work for designers of windshield applications through two scenarios. Overall, our design space can help building applications for diverse use cases. This includes apps inside and outside the car as well as applications for specific areas (fire fighters, police, ambulance).},
acmid = {2858336},
doi = {10.1145/2858036.2858336},
isbn = {978-1-4503-3362-7},
keywords = {automotive interfaces, design space, head-up display, in-vehicle interfaces, windshield display},
location = {San Jose, California, USA},
numpages = {16},
timestamp = {2016.05.13},
url = {http://www.florian-alt.org/unibw/wp-content/publications/haeuslschmid2016chi.pdf},
}
M. Hassib, M. Khamis, S. Schneegass, A. S. Shirazi, and F. Alt, “Investigating user needs for bio-sensing and affective wearables,” in Proceedings of the 2016 chi conference extended abstracts on human factors in computing systems, New York, NY, USA, 2016, p. 1415–1422. doi:10.1145/2851581.2892480
[BibTeX] [Abstract] [Download PDF]
Bio-sensing wearables are currently advancing to provide users with a lot of information about their physiological and affective states. However, relatively little is known about users’ interest in acquiring, sharing and receiving this information and through which channels and modalities. To close this gap, we report on the results of an online survey (N=109) exploring principle aspects of the design space of wearables such as data types, contexts, feedback modalities and sharing behaviors. Results show that users are interested in obtaining physiological, emotional and cognitive data through modalities beyond traditional touchscreen output. Valence of the information, whether positive or negative affects the sharing behaviors.
@InProceedings{hassib2016chiea,
author = {Hassib, Mariam and Khamis, Mohamed and Schneegass, Stefan and Shirazi, Ali Sahami and Alt, Florian},
title = {Investigating User Needs for Bio-sensing and Affective Wearables},
booktitle = {Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2016},
series = {CHI EA '16},
pages = {1415--1422},
address = {New York, NY, USA},
publisher = {ACM},
note = {hassib2016chiea},
abstract = {Bio-sensing wearables are currently advancing to provide users with a lot of information about their physiological and affective states. However, relatively little is known about users' interest in acquiring, sharing and receiving this information and through which channels and modalities. To close this gap, we report on the results of an online survey (N=109) exploring principle aspects of the design space of wearables such as data types, contexts, feedback modalities and sharing behaviors. Results show that users are interested in obtaining physiological, emotional and cognitive data through modalities beyond traditional touchscreen output. Valence of the information, whether positive or negative affects the sharing behaviors.},
acmid = {2892480},
doi = {10.1145/2851581.2892480},
isbn = {978-1-4503-4082-3},
keywords = {cognition, emotion, physiological sensing, wearables},
location = {San Jose, California, USA},
numpages = {8},
timestamp = {2016.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hassib2016chiea.pdf},
}
M. Khamis, F. Alt, and A. Bulling, “Challenges and design space of gaze-enabled public displays,” in Proceedings of the 2016 acm international joint conference on pervasive and ubiquitous computing, New York, NY, USA, 2016. doi:10.1145/2968219.2968342
[BibTeX] [Abstract] [Download PDF]
Gaze is an attractive modality for public displays, hence the recent years saw an increase in deployments of gaze-enabled public displays. Although gaze has been thoroughly investigated for desktop scenarios, gaze-enabled public displays present new challenges that are unique to this setup. In contrast to desktop settings, public displays (1) cannot afford requiring eye tracker calibration, (2) expect users to interact from different positions, and (3) expect multiple users to interact simultaneously. In this work we discuss these challenges, and explore the design space of gaze-enabled public displays. We conclude by discussing how the current state of research stands wrt.~the identified challenges, and highlight directions for future work.
@InProceedings{khamis2016petmei,
author = {Khamis, Mohamed and Alt, Florian and Bulling, Andreas},
title = {Challenges and Design Space of Gaze-enabled Public Displays},
booktitle = {Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing},
year = {2016},
series = {PETMEI '16},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2016petmei},
abstract = {Gaze is an attractive modality for public displays, hence the recent years saw an increase in deployments of gaze-enabled public displays. Although gaze has been thoroughly investigated for desktop scenarios, gaze-enabled public displays present new challenges that are unique to this setup. In contrast to desktop settings, public displays (1) cannot afford requiring eye tracker calibration, (2) expect users to interact from different positions, and (3) expect multiple users to interact simultaneously. In this work we discuss these challenges, and explore the design space of gaze-enabled public displays. We conclude by discussing how the current state of research stands wrt.~the identified challenges, and highlight directions for future work.},
doi = {10.1145/2968219.2968342},
location = {Heidelberg, Germany},
numpages = {10},
timestamp = {2016.09.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016petmei.pdf},
}
M. Khamis, F. Alt, M. Hassib, E. von Zezschwitz, R. Hasholzner, and A. Bulling, “Gazetouchpass: multimodal authentication using gaze and touch on mobile devices,” in Proceedings of the 2016 chi conference extended abstracts on human factors in computing systems, New York, NY, USA, 2016, p. 2156–2164. doi:10.1145/2851581.2892314
[BibTeX] [Abstract] [Download PDF]
We propose a multimodal scheme, GazeTouchPass, that combines gaze and touch for shoulder-surfing resistant user authentication on mobile devices. GazeTouchPass allows passwords with multiple switches between input modalities during authentication. This requires attackers to simultaneously observe the device screen and the user’s eyes to find the password. We evaluate the security and usability of GazeTouchPass in two user studies. Our findings show that GazeTouchPass is usable and significantly more secure than single-modal authentication against basic and even advanced shoulder-surfing attacks.
@InProceedings{khamis2016chiea,
author = {Khamis, Mohamed and Alt, Florian and Hassib, Mariam and von Zezschwitz, Emanuel and Hasholzner, Regina and Bulling, Andreas},
title = {GazeTouchPass: Multimodal Authentication Using Gaze and Touch on Mobile Devices},
booktitle = {Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2016},
series = {CHI EA '16},
pages = {2156--2164},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2016chiea},
abstract = {We propose a multimodal scheme, GazeTouchPass, that combines gaze and touch for shoulder-surfing resistant user authentication on mobile devices. GazeTouchPass allows passwords with multiple switches between input modalities during authentication. This requires attackers to simultaneously observe the device screen and the user's eyes to find the password. We evaluate the security and usability of GazeTouchPass in two user studies. Our findings show that GazeTouchPass is usable and significantly more secure than single-modal authentication against basic and even advanced shoulder-surfing attacks.},
acmid = {2892314},
doi = {10.1145/2851581.2892314},
isbn = {978-1-4503-4082-3},
keywords = {gaze gestures, mobile devices, multimodal authentication},
location = {San Jose, California, USA},
numpages = {9},
timestamp = {2016.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016chiea.pdf},
}
M. Khamis, O. Saltuk, A. Hang, K. Stolz, A. Bulling, and F. Alt, “Textpursuits: using text for pursuits-based interaction and calibration on public displays,” in Proceedings of the 2016 acm international joint conference on pervasive and ubiquitous computing, New York, NY, USA, 2016, p. 274–285. doi:10.1145/2971648.2971679
[BibTeX] [Abstract] [Download PDF]
In this paper we show how reading text on large display can be used to enable gaze interaction in public space. Our research is motivated by the fact that much of the content on public displays includes text. Hence, researchers and practitioners could greatly benefit from users being able to spontaneously interact as well as to implicitly calibrate an eye tracker while simply reading this text. In particular, we adapt Pursuits, a technique that correlates users’ eye movements with moving on-screen targets. While prior work used abstract objects or dots as targets, we explore the use of Pursuits with text (read-and-pursue). Thereby we address the challenge that eye movements performed while reading interfere with the pursuit movements. Results from two user studies (N=37) show that Pursuits with text is feasible and can achieve similar accuracy as non text-based pursuit approaches. While calibration is less accurate, it integrates smoothly with reading and allows areas of the display the user is looking at to be identified.
@InProceedings{khamis2016ubicomp,
author = {Khamis, Mohamed and Saltuk, Ozan and Hang, Alina and Stolz, Katharina and Bulling, Andreas and Alt, Florian},
title = {TextPursuits: Using Text for Pursuits-based Interaction and Calibration on Public Displays},
booktitle = {Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing},
year = {2016},
series = {UbiComp '16},
pages = {274--285},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2016ubicomp},
abstract = {In this paper we show how reading text on large display can be used to enable gaze interaction in public space. Our research is motivated by the fact that much of the content on public displays includes text. Hence, researchers and practitioners could greatly benefit from users being able to spontaneously interact as well as to implicitly calibrate an eye tracker while simply reading this text. In particular, we adapt Pursuits, a technique that correlates users' eye movements with moving on-screen targets. While prior work used abstract objects or dots as targets, we explore the use of Pursuits with text (read-and-pursue). Thereby we address the challenge that eye movements performed while reading interfere with the pursuit movements. Results from two user studies (N=37) show that Pursuits with text is feasible and can achieve similar accuracy as non text-based pursuit approaches. While calibration is less accurate, it integrates smoothly with reading and allows areas of the display the user is looking at to be identified.},
acmid = {2971679},
doi = {10.1145/2971648.2971679},
isbn = {978-1-4503-4461-6},
keywords = {gaze interaction, public displays, smooth pursuit, text},
location = {Heidelberg, Germany},
numpages = {12},
timestamp = {2016.09.18},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016ubicomp.pdf},
}
M. Khamis, L. Trotter, M. Tessmann, C. Dannhart, A. Bulling, and F. Alt, “Eyevote in the wild: do users bother correcting system errors on public displays?,” in Proceedings of the 15th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2016, p. 57–62. doi:10.1145/3012709.3012743
[BibTeX] [Abstract] [Download PDF]
Although recovering from errors is straightforward on most interfaces, public display systems pose very unique design challenges. Namely, public display users interact for very short amounts of times and are believed to abandon the display when interrupted or forced to deviate from the main task. To date, it is not well understood whether public display designers should enable users to correct errors (e.g. by asking users to confirm or giving them a chance correct their input), or aim for faster interaction and rely on other types of feedback to estimate errors. To close this gap, we conducted a field study where we investigated the users willingness to correct their input on public displays. We report on our findings from an in-the-wild deployment of a public gaze-based voting system where we intentionally evoked system errors to see if users correct them. We found that public display users are willing to correct system errors provided that the correction is fast and straightforward. We discuss how our findings influence the choice of interaction methods for public displays; interaction methods that are highly usable but suffer from low accuracy can still be effective if users can “undo” their interactions.
@InProceedings{khamis2016mum,
author = {Khamis, Mohamed and Trotter, Ludwig and Tessmann, Markus and Dannhart, Christina and Bulling, Andreas and Alt, Florian},
title = {EyeVote in the Wild: Do Users Bother Correcting System Errors on Public Displays?},
booktitle = {Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia},
year = {2016},
series = {MUM '16},
pages = {57--62},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2016mum},
abstract = {Although recovering from errors is straightforward on most interfaces, public display systems pose very unique design challenges. Namely, public display users interact for very short amounts of times and are believed to abandon the display when interrupted or forced to deviate from the main task. To date, it is not well understood whether public display designers should enable users to correct errors (e.g. by asking users to confirm or giving them a chance correct their input), or aim for faster interaction and rely on other types of feedback to estimate errors. To close this gap, we conducted a field study where we investigated the users willingness to correct their input on public displays. We report on our findings from an in-the-wild deployment of a public gaze-based voting system where we intentionally evoked system errors to see if users correct them. We found that public display users are willing to correct system errors provided that the correction is fast and straightforward. We discuss how our findings influence the choice of interaction methods for public displays; interaction methods that are highly usable but suffer from low accuracy can still be effective if users can "undo" their interactions.},
acmid = {3012743},
doi = {10.1145/3012709.3012743},
isbn = {978-1-4503-4860-7},
keywords = {gaze interaction, public displays, smooth pursuit, voting},
location = {Rovaniemi, Finland},
numpages = {6},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2016mum.pdf},
}
S. Schneegass, S. Ogando, and F. Alt, “Using on-body displays for extending the output of wearable devices,” in Proceedings of the 5th acm international symposium on pervasive displays, New York, NY, USA, 2016, p. 67–74. doi:10.1145/2914920.2915021
[BibTeX] [Abstract] [Download PDF]
In this work, we explore wearable on-body displays. These displays have the potential of extending the display space of smart watches to the user’s body. Our research is motivated by wearable computing devices moving technology closer to the human. Today, smart watches offer functionalities similar to smart phones, yet at a smaller form factor. To cope with the limited display real-estate we propose to use on-body displays integrated with clothing to extend the available display space. We present a design space for on-body displays and explore users’ location and visualization preferences. We also report on the design and implementation of a prototypical display system. We evaluated the prototype in a lab study with 16 participants, showing that on-body displays perform similar to current off-screen visualization techniques.
@InProceedings{schneegass2016perdis,
author = {Schneegass, Stefan and Ogando, Sophie and Alt, Florian},
title = {Using On-body Displays for Extending the Output of Wearable Devices},
booktitle = {Proceedings of the 5th ACM International Symposium on Pervasive Displays},
year = {2016},
series = {PerDis '16},
pages = {67--74},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneegass2016perdis},
abstract = {In this work, we explore wearable on-body displays. These displays have the potential of extending the display space of smart watches to the user's body. Our research is motivated by wearable computing devices moving technology closer to the human. Today, smart watches offer functionalities similar to smart phones, yet at a smaller form factor. To cope with the limited display real-estate we propose to use on-body displays integrated with clothing to extend the available display space. We present a design space for on-body displays and explore users' location and visualization preferences. We also report on the design and implementation of a prototypical display system. We evaluated the prototype in a lab study with 16 participants, showing that on-body displays perform similar to current off-screen visualization techniques.},
acmid = {2915021},
doi = {10.1145/2914920.2915021},
isbn = {978-1-4503-4366-4},
keywords = {focus + context, on-body display, smart textiles, wearable computing},
location = {Oulu, Finland},
numpages = {8},
timestamp = {2016.06.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2016perdis.pdf},
}
H. Schneider, K. Moser, A. Butz, and F. Alt, “Understanding the mechanics of persuasive system design: a mixed-method theory-driven analysis of freeletics,” in Proceedings of the 2016 chi conference on human factors in computing systems, New York, NY, USA, 2016, p. 309–320. doi:10.1145/2858036.2858290
[BibTeX] [Abstract] [Download PDF]
While we know that persuasive system design matters, we barely understand when persuasive strategies work and why they only work in some cases. We propose an approach to systematically understand and design for motivation, by studying the fundamental building blocks of motivation, according to the theory of planned behavior (TPB): attitude, subjective norm, and perceived control. We quantitatively analyzed (N=643) the attitudes, beliefs, and values of mobile fitness coach users with TPB. Capacity (i.e., perceived ability to exercise) had the biggest effect on users’ motivation. Using individual differences theory, we identified three distinct user groups, namely followers, hedonists, and achievers. With insights from semi-structured interviews (N=5) we derive design implications finding that transformation videos that feature other users’ success stories as well as suggesting an appropriate workout can have positive effects on perceived capacity. Practitioners and researchers can use our theory-based mixed-method research design to better understand user behavior in persuasive applications.
@InProceedings{schneider2016chi,
author = {Schneider, Hanna and Moser, Kilian and Butz, Andreas and Alt, Florian},
title = {Understanding the Mechanics of Persuasive System Design: A Mixed-Method Theory-driven Analysis of Freeletics},
booktitle = {Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems},
year = {2016},
series = {CHI '16},
pages = {309--320},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneider2016chi},
abstract = {While we know that persuasive system design matters, we barely understand when persuasive strategies work and why they only work in some cases. We propose an approach to systematically understand and design for motivation, by studying the fundamental building blocks of motivation, according to the theory of planned behavior (TPB): attitude, subjective norm, and perceived control. We quantitatively analyzed (N=643) the attitudes, beliefs, and values of mobile fitness coach users with TPB. Capacity (i.e., perceived ability to exercise) had the biggest effect on users' motivation. Using individual differences theory, we identified three distinct user groups, namely followers, hedonists, and achievers. With insights from semi-structured interviews (N=5) we derive design implications finding that transformation videos that feature other users' success stories as well as suggesting an appropriate workout can have positive effects on perceived capacity. Practitioners and researchers can use our theory-based mixed-method research design to better understand user behavior in persuasive applications.},
acmid = {2858290},
doi = {10.1145/2858036.2858290},
isbn = {978-1-4503-3362-7},
keywords = {behavior change, fitness application, personal values, persuasive technology, theory of planned behavior},
location = {San Jose, California, USA},
numpages = {12},
timestamp = {2016.05.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneider2016chi.pdf},
}
J. Shi and F. Alt, “The anonymous audience analyzer: visualizing audience behavior in public space,” in Proceedings of the 2016 chi conference extended abstracts on human factors in computing systems, New York, NY, USA, 2016, p. 3766–3769. doi:10.1145/2851581.2890256
[BibTeX] [Abstract] [Download PDF]
With dropping hardware prices, an increasing number of interactive displays is being deployed in public space. To investigate and understand the impact of novel interaction techniques, content, and display properties, researchers and practitioners alike rely on observations of the audience. While in-situ observations are costly in terms of time and effort, video data allows situations in front of the display to be analyzed post-hoc. In many situations, however, video recordings are not possible since the privacy of users needs to be protected. To address this challenge, we present a tool that allows scenes in front of a display to be reconstructed from Kinect data (user position and body posture) and visualized in a virtual environment. In this way, the privacy of the audience can be preserved while allowing display owners to run in-depth investigations of their display installations.
@InProceedings{shi2016chidemo,
author = {Shi, Jiamin and Alt, Florian},
title = {The Anonymous Audience Analyzer: Visualizing Audience Behavior in Public Space},
booktitle = {Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2016},
series = {CHI EA '16},
pages = {3766--3769},
address = {New York, NY, USA},
publisher = {ACM},
note = {shi2016chidemo},
abstract = {With dropping hardware prices, an increasing number of interactive displays is being deployed in public space. To investigate and understand the impact of novel interaction techniques, content, and display properties, researchers and practitioners alike rely on observations of the audience. While in-situ observations are costly in terms of time and effort, video data allows situations in front of the display to be analyzed post-hoc. In many situations, however, video recordings are not possible since the privacy of users needs to be protected. To address this challenge, we present a tool that allows scenes in front of a display to be reconstructed from Kinect data (user position and body posture) and visualized in a virtual environment. In this way, the privacy of the audience can be preserved while allowing display owners to run in-depth investigations of their display installations.},
acmid = {2890256},
doi = {10.1145/2851581.2890256},
isbn = {978-1-4503-4082-3},
keywords = {audience behaviour, public displays, virtual reality},
location = {San Jose, California, USA},
numpages = {4},
timestamp = {2016.04.23},
url = {http://www.florian-alt.org/unibw/wp-content/publications/shi2016chidemo.pdf},
}
J. Shi, D. Buschek, and F. Alt, “Investigating the impact of feedback on gaming performance on motivation to interact with public displays,” in Proceedings of the 2016 chi conference extended abstracts on human factors in computing systems, New York, NY, USA, 2016, p. 1344–1351. doi:10.1145/2851581.2892465
[BibTeX] [Abstract] [Download PDF]
This paper investigates the influence of feedback about users’ performance on their motivation as they interact with games on displays in public space. Our research is motivated by the fact that games are popular among both researchers and practitioners, due to their ability to attract many users. However, it is widely unclear, which factors impact on how much people play and whether they leave personal information on the display. We investigate different forms of feedback (highscore, real-time score and real-time rank during gameplay) and report on how they influence the behavior of users. Our results are based on data from the deployment of an interactive game in a public space.
@InProceedings{shi2016chiea,
author = {Shi, Jiamin and Buschek, Daniel and Alt, Florian},
title = {Investigating the Impact of Feedback on Gaming Performance on Motivation to Interact with Public Displays},
booktitle = {Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2016},
series = {CHI EA '16},
pages = {1344--1351},
address = {New York, NY, USA},
publisher = {ACM},
note = {shi2016chiea},
abstract = {This paper investigates the influence of feedback about users' performance on their motivation as they interact with games on displays in public space. Our research is motivated by the fact that games are popular among both researchers and practitioners, due to their ability to attract many users. However, it is widely unclear, which factors impact on how much people play and whether they leave personal information on the display. We investigate different forms of feedback (highscore, real-time score and real-time rank during gameplay) and report on how they influence the behavior of users. Our results are based on data from the deployment of an interactive game in a public space.},
acmid = {2892465},
doi = {10.1145/2851581.2892465},
isbn = {978-1-4503-4082-3},
keywords = {competition, motivation, public displays, user performance},
location = {San Jose, California, USA},
numpages = {8},
timestamp = {2016.05.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/shi2016chiea.pdf},
}
F. Steinberger, P. Proppe, R. Schroeter, and F. Alt, “Coastmaster: an ambient speedometer to gamify safe driving,” in Proceedings of the 8th international conference on automotive user interfaces and interactive vehicular applications, New York, NY, USA, 2016, p. 83–90. doi:10.1145/3003715.3005412
[BibTeX] [Abstract] [Download PDF]
We present CoastMaster, a smartphone application that serves as an ambient speedometer and driving game display. Our work is motivated by the need to re-engage drivers in the driving task, e.g., in situations where manoeuvering the vehicle is straightforward and does not require high levels of engagement. CoastMaster supports drivers during speed limit changes by (a) re-engaging them in the driving task, and; (b) providing feedback on driving behaviour. In a simulator study (N=24), we compare a gamified and a non-gamified interface with regards to user experience, driving performance, and visual distraction. Our results indicate an increase in hedonic quality and driver engagement as well as a decrease in speed violations through the gamified condition. At the same time, the gamified version leads to longer glances towards the display suggesting visual distraction. Our study findings inform specific design recommendations for ambient interfaces and gamified driving.
@InProceedings{steinberger2016autoui,
author = {Steinberger, Fabius and Proppe, Patrick and Schroeter, Ronald and Alt, Florian},
title = {CoastMaster: An Ambient Speedometer to Gamify Safe Driving},
booktitle = {Proceedings of the 8th International Conference on Automotive User Interfaces and Interactive Vehicular Applications},
year = {2016},
series = {Automotive'UI 16},
pages = {83--90},
address = {New York, NY, USA},
publisher = {ACM},
note = {steinberger2016autoui},
abstract = {We present CoastMaster, a smartphone application that serves as an ambient speedometer and driving game display. Our work is motivated by the need to re-engage drivers in the driving task, e.g., in situations where manoeuvering the vehicle is straightforward and does not require high levels of engagement. CoastMaster supports drivers during speed limit changes by (a) re-engaging them in the driving task, and; (b) providing feedback on driving behaviour. In a simulator study (N=24), we compare a gamified and a non-gamified interface with regards to user experience, driving performance, and visual distraction. Our results indicate an increase in hedonic quality and driver engagement as well as a decrease in speed violations through the gamified condition. At the same time, the gamified version leads to longer glances towards the display suggesting visual distraction. Our study findings inform specific design recommendations for ambient interfaces and gamified driving.},
acmid = {3005412},
doi = {10.1145/3003715.3005412},
isbn = {978-1-4503-4533-0},
keywords = {Ambient interface, design approach, distraction, gamification, interactive experience, vehicle-based apps},
location = {Ann Arbor, MI, USA},
numpages = {8},
timestamp = {2016.10.10},
url = {http://www.florian-alt.org/unibw/wp-content/publications/steinberger2016autoui.pdf},
}
E. von Zezschwitz, M. Eiband, D. Buschek, S. Oberhuber, A. De Luca, F. Alt, and H. Hussmann, “On quantifying the effective password space of grid-based unlock gestures,” in Proceedings of the 15th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2016, p. 201–212. doi:10.1145/3012709.3012729
[BibTeX] [Abstract] [Download PDF]
We present a similarity metric for Android unlock patterns to quantify the effective password space of user-defined gestures. Our metric is the first of its kind to reflect that users choose patterns based on human intuition and interest in geometric properties of the resulting shapes. Applying our metric to a dataset of 506 user-defined patterns reveals very similar shapes that only differ by simple geometric transformations such as rotation. This shrinks the effective password space by 66% and allows informed guessing attacks. Consequently, we present an approach to subtly nudge users to create more diverse patterns by showing background images and animations during pattern creation. Results from a user study (n = 496) show that applying such countermeasures can significantly increase pattern diversity. We conclude with implications for pattern choices and the design of enrollment processes.
@InProceedings{zezschwitz2016mum,
author = {von Zezschwitz, Emanuel and Eiband, Malin and Buschek, Daniel and Oberhuber, Sascha and De Luca, Alexander and Alt, Florian and Hussmann, Heinrich},
title = {On Quantifying the Effective Password Space of Grid-based Unlock Gestures},
booktitle = {Proceedings of the 15th International Conference on Mobile and Ubiquitous Multimedia},
year = {2016},
series = {MUM '16},
pages = {201--212},
address = {New York, NY, USA},
publisher = {ACM},
note = {zezschwitz2016mum},
abstract = {We present a similarity metric for Android unlock patterns to quantify the effective password space of user-defined gestures. Our metric is the first of its kind to reflect that users choose patterns based on human intuition and interest in geometric properties of the resulting shapes. Applying our metric to a dataset of 506 user-defined patterns reveals very similar shapes that only differ by simple geometric transformations such as rotation. This shrinks the effective password space by 66% and allows informed guessing attacks. Consequently, we present an approach to subtly nudge users to create more diverse patterns by showing background images and animations during pattern creation. Results from a user study (n = 496) show that applying such countermeasures can significantly increase pattern diversity. We conclude with implications for pattern choices and the design of enrollment processes.},
acmid = {3012729},
doi = {10.1145/3012709.3012729},
isbn = {978-1-4503-4860-7},
keywords = {metric, password space, security, similarity, unlock pattern, user selection},
location = {Rovaniemi, Finland},
numpages = {12},
timestamp = {2016.12.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/zezschwitz2016mum.pdf},
}
M. Khamis, A. Klimczak, M. Reiss, F. Alt, and A. Bulling, “Eyescout: active eye tracking for position and movementindependent gaze interaction with large public displays,” in Proceedings of the 30th annual acm symposium on user interface software & technology, New York, NY, USA, 2016. doi:10.1145/3126594.3126630
[BibTeX] [Abstract] [Download PDF]
While gaze holds a lot of promise for hands-free interaction with public displays, remote eye trackers with their confined tracking box restrict users to a single stationary position in front of the display. We present EyeScout, an active eye tracking system that combines an eye tracker mounted on a rail system with a computational method to automatically detect and align the tracker with the user’s lateral movement. EyeScout addresses key limitations of current gaze-enabled large public displays by offering two novel gaze-interaction modes for a single user: In “Walk then Interact” the user can walk up to an arbitrary position in front of the display and interact, while in “Walk and Interact” the user can interact even while on the move. We report on a user study that shows that EyeScout is well perceived by users, extends a public display’s sweet spot into a sweet line, and reduces gaze interaction kick-off time to 3.5 seconds – a 62% improvement over state of the art solutions. We discuss sample applications that demonstrate how EyeScout can enable position and movement-independent gaze interaction with large public displays.
@InProceedings{khamis2017uist,
author = {Khamis, Mohamed and Klimczak, Alexander and Reiss, Martin and Alt, Florian and Bulling, Andreas},
title = {EyeScout: Active Eye Tracking for Position and MovementIndependent Gaze Interaction with Large Public Displays},
booktitle = {Proceedings of the 30th Annual ACM Symposium on User Interface Software \& Technology},
year = {2016},
series = {UIST '17},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2017uist},
abstract = {While gaze holds a lot of promise for hands-free interaction with public displays, remote eye trackers with their confined tracking box restrict users to a single stationary position in front of the display. We present EyeScout, an active eye tracking system that combines an eye tracker mounted on a rail system with a computational method to automatically detect and align the tracker with the user's lateral movement. EyeScout addresses key limitations of current gaze-enabled large public displays by offering two novel gaze-interaction modes for a single user: In "Walk then Interact" the user can walk up to an arbitrary position in front of the display and interact, while in "Walk and Interact" the user can interact even while on the move. We report on a user study that shows that EyeScout is well perceived by users, extends a public display's sweet spot into a sweet line, and reduces gaze interaction kick-off time to 3.5 seconds -- a 62% improvement over state of the art solutions. We discuss sample applications that demonstrate how EyeScout can enable position and movement-independent gaze interaction with large public displays.},
acmid = {3126630},
doi = {10.1145/3126594.3126630},
isbn = {978-1-4503-4981-9/17/10},
location = {Quebec City, QC, Canada},
numpages = {12},
timestamp = {2016.10.05},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2017uist.pdf},
}

2015

F. Alt, A. Bulling, G. Gravanis, and D. Buschek, “Gravityspot: guiding users in front of public displays using on-screen visual cues,” in Proceedings of the 28th annual acm symposium on user interface software &\#38; technology, New York, NY, USA, 2015, p. 47–56. doi:10.1145/2807442.2807490
[BibTeX] [Abstract] [Download PDF]
Users tend to position themselves in front of interactive public displays in such a way as to best perceive its content. Currently, this sweet spot is implicitly defined by display properties, content, the input modality, as well as space constraints in front of the display. We present GravitySpot – an approach that makes sweet spots flexible by actively guiding users to arbitrary target positions in front of displays using visual cues. Such guidance is beneficial, for example, if a particular input technology only works at a specific distance or if users should be guided towards a non-crowded area of a large display. In two controlled lab studies (n=29) we evaluate different visual cues based on color, shape, and motion, as well as position-to-cue mapping functions. We show that both the visual cues and mapping functions allow for fine-grained control over positioning speed and accuracy. Findings are complemented by observations from a 3-month real-world deployment.
@InProceedings{alt2015uist,
author = {Alt, Florian and Bulling, Andreas and Gravanis, Gino and Buschek, Daniel},
title = {GravitySpot: Guiding Users in Front of Public Displays Using On-Screen Visual Cues},
booktitle = {Proceedings of the 28th Annual ACM Symposium on User Interface Software \&\#38; Technology},
year = {2015},
series = {UIST '15},
pages = {47--56},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2015uist},
abstract = {Users tend to position themselves in front of interactive public displays in such a way as to best perceive its content. Currently, this sweet spot is implicitly defined by display properties, content, the input modality, as well as space constraints in front of the display. We present GravitySpot - an approach that makes sweet spots flexible by actively guiding users to arbitrary target positions in front of displays using visual cues. Such guidance is beneficial, for example, if a particular input technology only works at a specific distance or if users should be guided towards a non-crowded area of a large display. In two controlled lab studies (n=29) we evaluate different visual cues based on color, shape, and motion, as well as position-to-cue mapping functions. We show that both the visual cues and mapping functions allow for fine-grained control over positioning speed and accuracy. Findings are complemented by observations from a 3-month real-world deployment.},
acmid = {2807490},
doi = {10.1145/2807442.2807490},
isbn = {978-1-4503-3779-3},
keywords = {audience behavior, interaction, public displays, sweet spot},
location = {Charlotte, NC, USA},
numpages = {10},
timestamp = {2015.11.18},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2015uist.pdf},
}
F. Alt, S. Schneegass, A. S. Shirazi, M. Hassib, and A. Bulling, “Graphical passwords in the wild: understanding how users choose pictures and passwords in image-based authentication schemes,” in Proceedings of the 17th international conference on human-computer interaction with mobile devices and services, New York, NY, USA, 2015, p. 316–322. doi:10.1145/2785830.2785882
[BibTeX] [Abstract] [Download PDF]
Common user authentication methods on smartphones, such as lock patterns, PINs, or passwords, impose a trade-off between security and password memorability. Image-based passwords were proposed as a secure and usable alternative. As of today, however, it remains unclear how such schemes are used in the wild. We present the first study to investigate how image-based passwords are used over long periods of time in the real world. Our analyses are based on data from 2318 unique devices collected over more than one year using a custom application released in the Android Play store. We present an in-depth analysis of what kind of images users select, how they define their passwords, and how secure these passwords are. Our findings provide valuable insights into real-world use of image-based passwords and inform the design of future graphical authentication schemes.
@InProceedings{alt2015mobilehci,
author = {Alt, Florian and Schneegass, Stefan and Shirazi, Alireza Sahami and Hassib, Mariam and Bulling, Andreas},
title = {Graphical Passwords in the Wild: Understanding How Users Choose Pictures and Passwords in Image-based Authentication Schemes},
booktitle = {Proceedings of the 17th International Conference on Human-Computer Interaction with Mobile Devices and Services},
year = {2015},
series = {MobileHCI '15},
pages = {316--322},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2015mobilehci},
abstract = {Common user authentication methods on smartphones, such as lock patterns, PINs, or passwords, impose a trade-off between security and password memorability. Image-based passwords were proposed as a secure and usable alternative. As of today, however, it remains unclear how such schemes are used in the wild. We present the first study to investigate how image-based passwords are used over long periods of time in the real world. Our analyses are based on data from 2318 unique devices collected over more than one year using a custom application released in the Android Play store. We present an in-depth analysis of what kind of images users select, how they define their passwords, and how secure these passwords are. Our findings provide valuable insights into real-world use of image-based passwords and inform the design of future graphical authentication schemes.},
acmid = {2785882},
doi = {10.1145/2785830.2785882},
isbn = {978-1-4503-3652-9},
keywords = {Graphical passwords, images, security},
location = {Copenhagen, Denmark},
numpages = {7},
timestamp = {2015.08.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2015mobilehci.pdf},
}
N. Broy, M. Guo, S. Schneegass, B. Pfleging, and F. Alt, “Introducing novel technologies in the car: conducting a real-world study to test 3d dashboards,” in Proceedings of the 7th international conference on automotive user interfaces and interactive vehicular applications, New York, NY, USA, 2015, p. 179–186. doi:10.1145/2799250.2799280
[BibTeX] [Abstract] [Download PDF]
Today, the vast majority of research on novel automotive user interface technologies is conducted in the lab, often using driving simulation. While such studies are important in early stages of the design process, we argue that ultimately studies need to be conducted in the real-world in order to investigate all aspects crucial for adoption of novel user interface technologies in commercial vehicles. In this paper, we present a case study that investigates introducing autostereoscopic 3D dashboards into cars. We report on studying this novel technology in the real world, validating and extending findings of prior simulator studies. Furthermore, we provide guidelines for practitioners and researchers to design and conduct real-world studies that minimize the risk for participants while at the same time yielding ecologically valid findings.
@InProceedings{broy2015autoui,
author = {Broy, Nora and Guo, Mengbing and Schneegass, Stefan and Pfleging, Bastian and Alt, Florian},
title = {Introducing Novel Technologies in the Car: Conducting a Real-world Study to Test 3D Dashboards},
booktitle = {Proceedings of the 7th International Conference on Automotive User Interfaces and Interactive Vehicular Applications},
year = {2015},
series = {AutomotiveUI '15},
pages = {179--186},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2015autoui},
abstract = {Today, the vast majority of research on novel automotive user interface technologies is conducted in the lab, often using driving simulation. While such studies are important in early stages of the design process, we argue that ultimately studies need to be conducted in the real-world in order to investigate all aspects crucial for adoption of novel user interface technologies in commercial vehicles. In this paper, we present a case study that investigates introducing autostereoscopic 3D dashboards into cars. We report on studying this novel technology in the real world, validating and extending findings of prior simulator studies. Furthermore, we provide guidelines for practitioners and researchers to design and conduct real-world studies that minimize the risk for participants while at the same time yielding ecologically valid findings.},
acmid = {2799280},
doi = {10.1145/2799250.2799280},
isbn = {978-1-4503-3736-6},
keywords = {automotive UIs, real world study, stereoscopic 3D},
location = {Nottingham, United Kingdom},
numpages = {8},
timestamp = {2015.09.14},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2015autoui.pdf},
}
N. Broy, M. Nefzger, F. Alt, M. Hassib, and A. Schmidt, “3D-HUDD – Developing a Prototyping Tool for 3D Head-Up Displays,” in Proceedings of the 15th IFIP TC13 International Conference on Human-Computer Interaction, New York, NY, USA, 2015.
[BibTeX] [Abstract] [Download PDF]
The ability of head-up displays (HUDs) to present informationwithin the usual viewpoint of the user has led to a quick adoption indomains where attention is crucial, such as in the car. As HUDs employ3D technology, further opportunities emerge: information can be structuredand positioned in 3D space thus allowing important information tobe perceived more easily and information can be registered with objectsin the visual scene to communicate a relationship. This allows novel userinterfaces to be built. As of today, however, no prototyping tools exist,that allow 3D UIs for HUDs to be sketched and tested prior to development.To close this gap, we report on the design and development ofthe 3D Head-Up Display Designer (3D-HUDD). In addition, we presentan evaluation of the tool with 24 participants, comparing dierent inputmodalities and depth management modes.
@InProceedings{broy2015interact,
author = {Nora Broy AND Matthias Nefzger AND Florian Alt AND Mariam Hassib AND Albrecht Schmidt},
title = {{3D-HUDD - Developing a Prototyping Tool for 3D Head-Up Displays}},
booktitle = {{Proceedings of the 15th IFIP TC13 International Conference on Human-Computer Interaction}},
year = {2015},
series = {INTERACT '15},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2015interact},
abstract = {The ability of head-up displays (HUDs) to present informationwithin the usual viewpoint of the user has led to a quick adoption indomains where attention is crucial, such as in the car. As HUDs employ3D technology, further opportunities emerge: information can be structuredand positioned in 3D space thus allowing important information tobe perceived more easily and information can be registered with objectsin the visual scene to communicate a relationship. This allows novel userinterfaces to be built. As of today, however, no prototyping tools exist,that allow 3D UIs for HUDs to be sketched and tested prior to development.To close this gap, we report on the design and development ofthe 3D Head-Up Display Designer (3D-HUDD). In addition, we presentan evaluation of the tool with 24 participants, comparing dierent inputmodalities and depth management modes.},
location = {Bamberg, Germany},
numpages = {6},
owner = {florianalt},
timestamp = {2015.09.14},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2015interact.pdf},
}
N. Broy, S. Schneegass, M. Guo, F. Alt, and A. Schmidt, “Evaluating stereoscopic 3d for automotive user interfaces in a real-world driving study,” in Proceedings of the 33rd annual acm conference extended abstracts on human factors in computing systems, New York, NY, USA, 2015, p. 1717–1722. doi:10.1145/2702613.2732902
[BibTeX] [Abstract] [Download PDF]
This paper reports on the use of in-car 3D displays in a real-world driving scenario. Today, stereoscopic displays are becoming ubiquitous in many domains such as mobile phones or TVs. Instead of using 3D for entertainment, we explore the 3D effect as a mean to spatially structure user interface (UI) elements. To evaluate potentials and drawbacks of in-car 3D displays we mounted an autostereoscopic display as instrument cluster in a vehicle and conducted a real-world driving study with 15 experts in automotive UI design. The results show that the 3D effect increases the perceived quality of the UI and enhances the presentation of spatial information (e.g., navigation cues) compared to 2D. However, the effect should be used well-considered to avoid spatial clutter which can increase the system’s complexity.
@InProceedings{broy2015chiea,
author = {Broy, Nora and Schneegass, Stefan and Guo, Mengbing and Alt, Florian and Schmidt, Albrecht},
title = {Evaluating Stereoscopic 3D for Automotive User Interfaces in a Real-World Driving Study},
booktitle = {Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2015},
series = {CHI EA '15},
pages = {1717--1722},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2015chiea},
abstract = {This paper reports on the use of in-car 3D displays in a real-world driving scenario. Today, stereoscopic displays are becoming ubiquitous in many domains such as mobile phones or TVs. Instead of using 3D for entertainment, we explore the 3D effect as a mean to spatially structure user interface (UI) elements. To evaluate potentials and drawbacks of in-car 3D displays we mounted an autostereoscopic display as instrument cluster in a vehicle and conducted a real-world driving study with 15 experts in automotive UI design. The results show that the 3D effect increases the perceived quality of the UI and enhances the presentation of spatial information (e.g., navigation cues) compared to 2D. However, the effect should be used well-considered to avoid spatial clutter which can increase the system's complexity.},
acmid = {2732902},
doi = {10.1145/2702613.2732902},
isbn = {978-1-4503-3146-3},
keywords = {automotive user interfaces, stereoscopic 3D},
location = {Seoul, Republic of Korea},
numpages = {6},
timestamp = {2015.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2015chiea.pdf},
}
D. Buschek and F. Alt, “Touchml: a machine learning toolkit for modelling spatial touch targeting behaviour,” in Proceedings of the 20th international conference on intelligent user interfaces, New York, NY, USA, 2015, p. 110–114. doi:10.1145/2678025.2701381
[BibTeX] [Abstract] [Download PDF]
Pointing tasks are commonly studied in HCI research, for example to evaluate and compare different interaction techniques or devices. A recent line of work has modelled user-specific touch behaviour with machine learning methods to reveal spatial targeting error patterns across the screen. These models can also be applied to improve accuracy of touchscreens and keyboards, and to recognise users and hand postures. However, no implementation of these techniques has been made publicly available yet, hindering broader use in research and practical deployments. Therefore, this paper presents a toolkit which implements such touch models for data analysis (Python), mobile applications (Java/Android), and the web (JavaScript). We demonstrate several applications, including hand posture recognition, on touch targeting data collected in a study with 24 participants. We consider different target types and hand postures, changing behaviour over time, and the influence of hand sizes.
@InProceedings{buschek2015iui,
author = {Buschek, Daniel and Alt, Florian},
title = {TouchML: A Machine Learning Toolkit for Modelling Spatial Touch Targeting Behaviour},
booktitle = {Proceedings of the 20th International Conference on Intelligent User Interfaces},
year = {2015},
series = {IUI '15},
pages = {110--114},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2015iui},
abstract = {Pointing tasks are commonly studied in HCI research, for example to evaluate and compare different interaction techniques or devices. A recent line of work has modelled user-specific touch behaviour with machine learning methods to reveal spatial targeting error patterns across the screen. These models can also be applied to improve accuracy of touchscreens and keyboards, and to recognise users and hand postures. However, no implementation of these techniques has been made publicly available yet, hindering broader use in research and practical deployments. Therefore, this paper presents a toolkit which implements such touch models for data analysis (Python), mobile applications (Java/Android), and the web (JavaScript). We demonstrate several applications, including hand posture recognition, on touch targeting data collected in a study with 24 participants. We consider different target types and hand postures, changing behaviour over time, and the influence of hand sizes.},
acmid = {2701381},
doi = {10.1145/2678025.2701381},
isbn = {978-1-4503-3306-1},
keywords = {gaussian process, machine learning, toolkit, touch},
location = {Atlanta, Georgia, USA},
numpages = {5},
timestamp = {2015.03.29},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015iui.pdf},
}
D. Buschek, A. Auch, and F. Alt, “A toolkit for analysis and prediction of touch targeting behaviour on mobile websites,” in Proceedings of the 7th acm sigchi symposium on engineering interactive computing systems, New York, NY, USA, 2015, p. 54–63. doi:10.1145/2774225.2774851
[BibTeX] [Abstract] [Download PDF]
Touch interaction on mobile devices suffers from several problems, such as the thumb’s limited reach or the occlusion of targets by the finger. This leads to offsets between the user’s intended touch location and the actual location sensed by the device. Recent research has modelled such offset patterns to analyse and predict touch targeting behaviour. However, these models have only been applied in lab experiments for specific tasks (typing, pointing, targeting games). In contrast, their applications to websites are yet unexplored. To close this gap, this paper explores the potential of touch modelling for the mobile web: We present a toolkit which allows web developers to collect and analyse touch interactions with their websites. Our system can learn about users’ targeting patterns to simulate expected touch interactions and help identify potential usability issues for future versions of the website prior to deployment. We train models on data collected in a field experiment with 50 participants in a shopping scenario. Our analyses show that the resulting models capture interesting behavioural patterns, reveal insights into user-specific behaviour, and enable predictions of expected error rates for individual interface elements.
@InProceedings{buschek2015eics,
author = {Buschek, Daniel and Auch, Alexander and Alt, Florian},
title = {A Toolkit for Analysis and Prediction of Touch Targeting Behaviour on Mobile Websites},
booktitle = {Proceedings of the 7th ACM SIGCHI Symposium on Engineering Interactive Computing Systems},
year = {2015},
series = {EICS '15},
pages = {54--63},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2015eics},
abstract = {Touch interaction on mobile devices suffers from several problems, such as the thumb's limited reach or the occlusion of targets by the finger. This leads to offsets between the user's intended touch location and the actual location sensed by the device. Recent research has modelled such offset patterns to analyse and predict touch targeting behaviour. However, these models have only been applied in lab experiments for specific tasks (typing, pointing, targeting games). In contrast, their applications to websites are yet unexplored. To close this gap, this paper explores the potential of touch modelling for the mobile web: We present a toolkit which allows web developers to collect and analyse touch interactions with their websites. Our system can learn about users' targeting patterns to simulate expected touch interactions and help identify potential usability issues for future versions of the website prior to deployment. We train models on data collected in a field experiment with 50 participants in a shopping scenario. Our analyses show that the resulting models capture interesting behavioural patterns, reveal insights into user-specific behaviour, and enable predictions of expected error rates for individual interface elements.},
acmid = {2774851},
doi = {10.1145/2774225.2774851},
isbn = {978-1-4503-3646-8},
keywords = {mobile, targeting, toolkit, touch, user model, web},
location = {Duisburg, Germany},
numpages = {10},
timestamp = {2015.06.23},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015eics.pdf},
}
D. Buschek, A. De Luca, and F. Alt, “Improving accuracy, applicability and usability of keystroke biometrics on mobile touchscreen devices,” in Proceedings of the 33rd annual acm conference on human factors in computing systems, New York, NY, USA, 2015, p. 1393–1402. doi:10.1145/2702123.2702252
[BibTeX] [Abstract] [Download PDF]
Authentication methods can be improved by considering implicit, individual behavioural cues. In particular, verifying users based on typing behaviour has been widely studied with physical keyboards. On mobile touchscreens, the same concepts have been applied with little adaptations so far. This paper presents the first reported study on mobile keystroke biometrics which compares touch-specific features between three different hand postures and evaluation schemes. Based on 20.160 password entries from a study with 28 participants over two weeks, we show that including spatial touch features reduces implicit authentication equal error rates (EER) by 26.4 – 36.8% relative to the previously used temporal features. We also show that authentication works better for some hand postures than others. To improve applicability and usability, we further quantify the influence of common evaluation assumptions: known attacker data, training and testing on data from a single typing session, and fixed hand postures. We show that these practices can lead to overly optimistic evaluations. In consequence, we describe evaluation recommendations, a probabilistic framework to handle unknown hand postures, and ideas for further improvements.
@InProceedings{buschek2015chi,
author = {Buschek, Daniel and De Luca, Alexander and Alt, Florian},
title = {Improving Accuracy, Applicability and Usability of Keystroke Biometrics on Mobile Touchscreen Devices},
booktitle = {Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems},
year = {2015},
series = {CHI '15},
pages = {1393--1402},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2015chi},
abstract = {Authentication methods can be improved by considering implicit, individual behavioural cues. In particular, verifying users based on typing behaviour has been widely studied with physical keyboards. On mobile touchscreens, the same concepts have been applied with little adaptations so far. This paper presents the first reported study on mobile keystroke biometrics which compares touch-specific features between three different hand postures and evaluation schemes. Based on 20.160 password entries from a study with 28 participants over two weeks, we show that including spatial touch features reduces implicit authentication equal error rates (EER) by 26.4 - 36.8% relative to the previously used temporal features. We also show that authentication works better for some hand postures than others. To improve applicability and usability, we further quantify the influence of common evaluation assumptions: known attacker data, training and testing on data from a single typing session, and fixed hand postures. We show that these practices can lead to overly optimistic evaluations. In consequence, we describe evaluation recommendations, a probabilistic framework to handle unknown hand postures, and ideas for further improvements.},
acmid = {2702252},
doi = {10.1145/2702123.2702252},
isbn = {978-1-4503-3145-6},
keywords = {biometrics, keystroke dynamics, mobile, touch},
location = {Seoul, Republic of Korea},
numpages = {10},
timestamp = {2015.04.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015chi.pdf},
}
D. Buschek, A. De Luca, and F. Alt, “There is more to typing than speed: expressive mobile touch keyboards via dynamic font personalisation,” in Proceedings of the 17th international conference on human-computer interaction with mobile devices and services, New York, NY, USA, 2015, p. 125–130. doi:10.1145/2785830.2785844
[BibTeX] [Abstract] [Download PDF]
Typing is a common task on mobile devices and has been widely addressed in HCI research, mostly regarding quantitative factors such as error rates and speed. Qualitative aspects, like personal expressiveness, have received less attention. This paper makes individual typing behaviour visible to the users to render mobile typing more personal and expressive in varying contexts: We introduce a dynamic font personalisation framework, TapScript, which adapts a finger-drawn font according to user behaviour and context, such as finger placement, device orientation and movements – resulting in a handwritten-looking font. We implemented TapScript for evaluation with an online survey (N=91) and a field study with a chat app (N=11). Looking at resulting fonts, survey participants distinguished pairs of typists with 84.5% accuracy and walking/sitting with 94.8%. Study participants perceived fonts as individual and the chat experience as personal. They also made creative explicit use of font adaptations.
@InProceedings{buschek2015mobilehci,
author = {Buschek, Daniel and De Luca, Alexander and Alt, Florian},
title = {There is More to Typing Than Speed: Expressive Mobile Touch Keyboards via Dynamic Font Personalisation},
booktitle = {Proceedings of the 17th International Conference on Human-Computer Interaction with Mobile Devices and Services},
year = {2015},
series = {MobileHCI '15},
pages = {125--130},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2015mobilehci},
abstract = {Typing is a common task on mobile devices and has been widely addressed in HCI research, mostly regarding quantitative factors such as error rates and speed. Qualitative aspects, like personal expressiveness, have received less attention. This paper makes individual typing behaviour visible to the users to render mobile typing more personal and expressive in varying contexts: We introduce a dynamic font personalisation framework, TapScript, which adapts a finger-drawn font according to user behaviour and context, such as finger placement, device orientation and movements - resulting in a handwritten-looking font. We implemented TapScript for evaluation with an online survey (N=91) and a field study with a chat app (N=11). Looking at resulting fonts, survey participants distinguished pairs of typists with 84.5% accuracy and walking/sitting with 94.8%. Study participants perceived fonts as individual and the chat experience as personal. They also made creative explicit use of font adaptations.},
acmid = {2785844},
doi = {10.1145/2785830.2785844},
isbn = {978-1-4503-3652-9},
keywords = {Font Personalisation, Mobile, Touch Typing},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2015.08.23},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015mobilehci.pdf},
}
D. Buschek, I. Just, B. Fritzsche, and F. Alt, “Make Me Laugh: A Recommendation System for Humoristic Content on the World Wide Web,” in Proceedings of Mensch and Computer 2015, 2015.
[BibTeX] [Abstract] [Download PDF]
Humoristic content is an inherent part of the World Wide Web and increasingly consumed for micro-entertainment. However, humor is often highly individual and depends on background knowledge and context. This paper presents an approach to recommend humoristic content fitting each individual user’s taste and interests. In a field study with 150 participants over four weeks, users rated content with a 0-10 scale on a humor website. Based on this data, we train and apply a Collaborative Filtering (CF) algorithm to assess individual humor and recommend fitting content. Our study shows that users rate recommended content 22.6% higher than randomly chosen content.
@InProceedings{buschek2015muc,
author = {Buschek, Daniel and Just, Ingo and Fritzsche, Benjamin AND Alt, Florian},
title = {{Make Me Laugh: A Recommendation System for Humoristic Content on the World Wide Web}},
booktitle = {{Proceedings of Mensch and Computer 2015}},
year = {2015},
note = {buschek2015muc},
abstract = {Humoristic content is an inherent part of the World Wide Web and increasingly consumed for micro-entertainment. However, humor is often highly individual and depends on background knowledge and context. This paper presents an approach to recommend humoristic content fitting each individual user's taste and interests. In a field study with 150 participants over four weeks, users rated content with a 0-10 scale on a humor website. Based on this data, we train and apply a Collaborative Filtering (CF) algorithm to assess individual humor and recommend fitting content. Our study shows that users rate recommended content 22.6% higher than randomly chosen content.},
location = {Stuttgart, Germany},
numpages = {10},
owner = {florian},
timestamp = {2015.09.06},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015muc.pdf},
}
D. Buschek, M. Spitzer, and F. Alt, “Video-recording your life: user perception and experiences,” in Proceedings of the 33rd annual acm conference extended abstracts on human factors in computing systems, New York, NY, USA, 2015, p. 2223–2228. doi:10.1145/2702613.2732743
[BibTeX] [Abstract] [Download PDF]
Video recording is becoming an integral part of our daily activities: Action cams and wearable cameras allow us to capture scenes of our daily life effortlessly. This trend generates vast amounts of video material impossible to review manually. However, these recordings also contain a lot of information potentially interesting to the recording individual and to others. Such videos can provide a meaningful summary of the day, serving as a digital extension to the user’s human memory. They might also be interesting to others as tutorials (e.g. how to change a flat tyre). As a first step towards this vision, we present a survey assessing the users’ view and their video recording behavior. Findings were used to inform the design of a prototype based on off-the-shelf components, which allows users to create meaningful video clips of their daily activities in an automated manner by using their phone and any wearable camera. We conclude with a preliminary, qualitative study showing the feasibility and potential of the approach and sketch future research directions.
@InProceedings{buschek2015chiea,
author = {Buschek, Daniel and Spitzer, Michael and Alt, Florian},
title = {Video-Recording Your Life: User Perception and Experiences},
booktitle = {Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems},
year = {2015},
series = {CHI EA '15},
pages = {2223--2228},
address = {New York, NY, USA},
publisher = {ACM},
note = {buschek2015chiea},
abstract = {Video recording is becoming an integral part of our daily activities: Action cams and wearable cameras allow us to capture scenes of our daily life effortlessly. This trend generates vast amounts of video material impossible to review manually. However, these recordings also contain a lot of information potentially interesting to the recording individual and to others. Such videos can provide a meaningful summary of the day, serving as a digital extension to the user's human memory. They might also be interesting to others as tutorials (e.g. how to change a flat tyre). As a first step towards this vision, we present a survey assessing the users' view and their video recording behavior. Findings were used to inform the design of a prototype based on off-the-shelf components, which allows users to create meaningful video clips of their daily activities in an automated manner by using their phone and any wearable camera. We conclude with a preliminary, qualitative study showing the feasibility and potential of the approach and sketch future research directions.},
acmid = {2732743},
doi = {10.1145/2702613.2732743},
isbn = {978-1-4503-3146-3},
keywords = {context, life logging, smartphone, video recording},
location = {Seoul, Republic of Korea},
numpages = {6},
timestamp = {2015.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/buschek2015chiea.pdf},
}
A. Colley, L. Ventä-Olkkonen, F. Alt, and J. Häkkilä, “Insights from deploying see-through augmented reality signage in the wild,” in Proceedings of the 4th international symposium on pervasive displays, New York, NY, USA, 2015, p. 179–185. doi:10.1145/2757710.2757730
[BibTeX] [Abstract] [Download PDF]
Typically the key challenges with interactive digital signage are (1) interaction times are short (usually in the order of seconds), (2) interaction needs to be very easy to understand, and (3) interaction needs to provide a benefit that justifies the effort to engage. To tackle these challenges, we propose a see-through augmented reality application for digital signage that enables passersby to observe the area behind the display, augmented with useful data. We report on the development and deployment of our application in two public settings: a public library and a supermarket. Based on observations of 261 (library) and 661 (supermarket) passersby and 14 interviews, we provide early insights and implications for application designers. Our results show a significant increase in attention: the see-through signage was noticed by 46% of the people, compared to 14% with the non-see through version. Furthermore, findings indicate that to best benefit the passersby, the AR displays should clearly communicate their purpose.
@InProceedings{colley2015perdis,
author = {Colley, Ashley and Vent\"{a}-Olkkonen, Leena and Alt, Florian and H\"{a}kkil\"{a}, Jonna},
title = {Insights from Deploying See-Through Augmented Reality Signage in the Wild},
booktitle = {Proceedings of the 4th International Symposium on Pervasive Displays},
year = {2015},
series = {PerDis '15},
pages = {179--185},
address = {New York, NY, USA},
publisher = {ACM},
note = {colley2015perdis},
abstract = {Typically the key challenges with interactive digital signage are (1) interaction times are short (usually in the order of seconds), (2) interaction needs to be very easy to understand, and (3) interaction needs to provide a benefit that justifies the effort to engage. To tackle these challenges, we propose a see-through augmented reality application for digital signage that enables passersby to observe the area behind the display, augmented with useful data. We report on the development and deployment of our application in two public settings: a public library and a supermarket. Based on observations of 261 (library) and 661 (supermarket) passersby and 14 interviews, we provide early insights and implications for application designers. Our results show a significant increase in attention: the see-through signage was noticed by 46% of the people, compared to 14% with the non-see through version. Furthermore, findings indicate that to best benefit the passersby, the AR displays should clearly communicate their purpose.},
acmid = {2757730},
doi = {10.1145/2757710.2757730},
isbn = {978-1-4503-3608-6},
keywords = {AR, attention, digital signage, interaction, public displays},
location = {Saarbruecken, Germany},
numpages = {7},
timestamp = {2015.06.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/colley2015perdis.pdf},
}
T. Dingler, M. Funk, and F. Alt, “Interaction proxemics: combining physical spaces for seamless gesture interaction,” in Proceedings of the 4th international symposium on pervasive displays, New York, NY, USA, 2015, p. 107–114. doi:10.1145/2757710.2757722
[BibTeX] [Abstract] [Download PDF]
Touch and gesture input have become popular for display interaction. While applications usually focus on one particular input technology, we set out to adjust the interaction modality based on the proximity of users to the screen. Therefore, we built a system which combines technology-transparent interaction spaces across 4 interaction zones: touch, fine-grained, general, and coarse gestures. In a user study, participants performed a pointing task within and across these zones. Results show that zone transitions are most feasible up to 2m from the screen. Hence, applications can map functionality across different interaction zones, thereby providing additional interaction dimensions and decreasing the complexity of the gesture set. We collected subjective feedback and present a user-defined gesture set for performing a series of standard tasks across different interaction zones. Seamless transition between these spaces is essential to create a consistent interaction experience; finally, we discuss characteristics of systems that take into account user proxemics as input modality.
@InProceedings{dingler2015perdis,
author = {Dingler, Tilman and Funk, Markus and Alt, Florian},
title = {Interaction Proxemics: Combining Physical Spaces for Seamless Gesture Interaction},
booktitle = {Proceedings of the 4th International Symposium on Pervasive Displays},
year = {2015},
series = {PerDis '15},
pages = {107--114},
address = {New York, NY, USA},
publisher = {ACM},
note = {dingler2015perdis},
abstract = {Touch and gesture input have become popular for display interaction. While applications usually focus on one particular input technology, we set out to adjust the interaction modality based on the proximity of users to the screen. Therefore, we built a system which combines technology-transparent interaction spaces across 4 interaction zones: touch, fine-grained, general, and coarse gestures. In a user study, participants performed a pointing task within and across these zones. Results show that zone transitions are most feasible up to 2m from the screen. Hence, applications can map functionality across different interaction zones, thereby providing additional interaction dimensions and decreasing the complexity of the gesture set. We collected subjective feedback and present a user-defined gesture set for performing a series of standard tasks across different interaction zones. Seamless transition between these spaces is essential to create a consistent interaction experience; finally, we discuss characteristics of systems that take into account user proxemics as input modality.},
acmid = {2757722},
doi = {10.1145/2757710.2757722},
isbn = {978-1-4503-3608-6},
keywords = {Interaction, distance, gestures, proxemics},
location = {Saarbruecken, Germany},
numpages = {8},
timestamp = {2015.06.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/dingler2015perdis.pdf},
}
M. Khamis, F. Alt, and A. Bulling, “A field study on spontaneous gaze-based interaction with a public display using pursuits,” in Adjunct proceedings of the 2015 acm international joint conference on pervasive and ubiquitous computing and proceedings of the 2015 acm international symposium on wearable computers, New York, NY, USA, 2015, p. 863–872. doi:10.1145/2800835.2804335
[BibTeX] [Abstract] [Download PDF]
Smooth pursuit eye movements were recently introduced as a promising technique for calibration-free and thus spontaneous and natural gaze interaction. While pursuits have been evaluated in controlled laboratory studies, the technique has not yet been evaluated with respect to usability in the wild. We report on a field study in which we deployed a game on a public display where participants used pursuits to select fish moving in linear and circular trajectories at different speeds. The study ran for two days in a busy computer lab resulting in a total of 56 interactions. Results from our study show that linear trajectories are statistically faster to select via pursuits than circular trajectories. We also found that pursuits is well perceived by users who find it fast and responsive.
@InProceedings{khamis2015petmei,
author = {Khamis, Mohamed and Alt, Florian and Bulling, Andreas},
title = {A Field Study on Spontaneous Gaze-based Interaction with a Public Display Using Pursuits},
booktitle = {Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 ACM International Symposium on Wearable Computers},
year = {2015},
series = {UbiComp/ISWC'15 Adjunct},
pages = {863--872},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2015petmei},
abstract = {Smooth pursuit eye movements were recently introduced as a promising technique for calibration-free and thus spontaneous and natural gaze interaction. While pursuits have been evaluated in controlled laboratory studies, the technique has not yet been evaluated with respect to usability in the wild. We report on a field study in which we deployed a game on a public display where participants used pursuits to select fish moving in linear and circular trajectories at different speeds. The study ran for two days in a busy computer lab resulting in a total of 56 interactions. Results from our study show that linear trajectories are statistically faster to select via pursuits than circular trajectories. We also found that pursuits is well perceived by users who find it fast and responsive.},
acmid = {2804335},
doi = {10.1145/2800835.2804335},
isbn = {978-1-4503-3575-1},
keywords = {field study, pervasive displays, public displays, pursuits, smooth pursuit eye movement},
location = {Osaka, Japan},
numpages = {10},
timestamp = {2015.09.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2015petmei.pdf},
}
M. Khamis, A. Bulling, and F. Alt, “Tackling challenges of interactive public displays using gaze,” in Adjunct proceedings of the 2015 acm international joint conference on pervasive and ubiquitous computing and proceedings of the 2015 acm international symposium on wearable computers, New York, NY, USA, 2015, p. 763–766. doi:10.1145/2800835.2807951
[BibTeX] [Abstract] [Download PDF]
Falling hardware prices led to a widespread use of public displays. Common interaction techniques for such displays currently include touch, mid-air, or smartphone-based interaction. While these techniques are well understood from a technical perspective, several remaining challenges hinder the uptake of interactive displays among passersby. In this paper we propose addressing major public display challenges through gaze as a novel interaction modality. We discuss why gaze-based interaction can tackle these challenges effectively and discuss how solutions can be technically realized. Furthermore, we summarize state-of-the-art eye tracking techniques that show particular promise in the area of public displays.
@InProceedings{khamis2015pdapps,
author = {Khamis, Mohamed and Bulling, Andreas and Alt, Florian},
title = {Tackling Challenges of Interactive Public Displays Using Gaze},
booktitle = {Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 ACM International Symposium on Wearable Computers},
year = {2015},
series = {UbiComp/ISWC'15 Adjunct},
pages = {763--766},
address = {New York, NY, USA},
publisher = {ACM},
note = {khamis2015pdapps},
abstract = {Falling hardware prices led to a widespread use of public displays. Common interaction techniques for such displays currently include touch, mid-air, or smartphone-based interaction. While these techniques are well understood from a technical perspective, several remaining challenges hinder the uptake of interactive displays among passersby. In this paper we propose addressing major public display challenges through gaze as a novel interaction modality. We discuss why gaze-based interaction can tackle these challenges effectively and discuss how solutions can be technically realized. Furthermore, we summarize state-of-the-art eye tracking techniques that show particular promise in the area of public displays.},
acmid = {2807951},
doi = {10.1145/2800835.2807951},
isbn = {978-1-4503-3575-1},
keywords = {digital signage, gaze, gaze-based interaction, pervasive displays, public displays},
location = {Osaka, Japan},
numpages = {4},
timestamp = {2015.09.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/khamis2015pdapps.pdf},
}
N. Memarovic, S. Clinch, and F. Alt, “Understanding display blindness in future display deployments,” in Proceedings of the 4th international symposium on pervasive displays, New York, NY, USA, 2015, p. 7–14. doi:10.1145/2757710.2757719
[BibTeX] [Abstract] [Download PDF]
Digital displays are heralded as a transformative medium for communication. However, a known challenge in the domain is that of display blindness in which passersby pay little or no attention to public displays. This phenomenon has been a major motivation for much of the research on public displays. However, since the early observations, little has been done to develop our understanding of display blindness – for example, to identify determining factors or propose appropriate metrics. Hence, the degree to which developments in signage form, content, and interaction address display blindness remains unclear. In this paper we examine and categorize current approaches to studying and addressing display blindness. Based on our analysis we identify open questions in the research space, including the impact of display physicality and audience differences, relationships with other observed effects, the impact of research interventions, and selection of appropriate metrics. The goal of this paper is to start a discussion within the community on the topic, and to inform the design of future research.
@InProceedings{memarovic2015perdis,
author = {Memarovic, Nemanja and Clinch, Sarah and Alt, Florian},
title = {Understanding Display Blindness in Future Display Deployments},
booktitle = {Proceedings of the 4th International Symposium on Pervasive Displays},
year = {2015},
series = {PerDis '15},
pages = {7--14},
address = {New York, NY, USA},
publisher = {ACM},
note = {memarovic2015perdis},
abstract = {Digital displays are heralded as a transformative medium for communication. However, a known challenge in the domain is that of display blindness in which passersby pay little or no attention to public displays. This phenomenon has been a major motivation for much of the research on public displays. However, since the early observations, little has been done to develop our understanding of display blindness -- for example, to identify determining factors or propose appropriate metrics. Hence, the degree to which developments in signage form, content, and interaction address display blindness remains unclear. In this paper we examine and categorize current approaches to studying and addressing display blindness. Based on our analysis we identify open questions in the research space, including the impact of display physicality and audience differences, relationships with other observed effects, the impact of research interventions, and selection of appropriate metrics. The goal of this paper is to start a discussion within the community on the topic, and to inform the design of future research.},
acmid = {2757719},
doi = {10.1145/2757710.2757719},
isbn = {978-1-4503-3608-6},
keywords = {Display blindness, interaction blindness, public displays},
location = {Saarbruecken, Germany},
numpages = {8},
timestamp = {2015.06.08},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2015perdis.pdf},
}
P. Panhey, T. Döring, S. Schneegass, D. Wenig, and F. Alt, “What people really remember: understanding cognitive effects when interacting with large displays,” in Proceedings of the 2015 international conference on interactive tabletops & surfaces, New York, NY, USA, 2015, p. 103–106. doi:10.1145/2817721.2817732
[BibTeX] [Abstract] [Download PDF]
This paper investigates how common interaction techniques for large displays impact on recall in learning tasks. Our work is motivated by results of prior research in different areas that attribute a positive effect of interactivity to cognition. We present findings from a controlled lab experiment with 32 participants comparing mobile phone-based interaction, touch interaction and full-body interaction to a non-interactive baseline. In contrast to prior findings, our results reveal that more movement can negatively influence recall. In particular we show that designers are facing an immanent trade-off between designing engaging interaction through extensive movement and creating memorable content.
@InProceedings{panhey2015its,
author = {Panhey, Philipp and D\"{o}ring, Tanja and Schneegass, Stefan and Wenig, Dirk and Alt, Florian},
title = {What People Really Remember: Understanding Cognitive Effects When Interacting with Large Displays},
booktitle = {Proceedings of the 2015 International Conference on Interactive Tabletops \& Surfaces},
year = {2015},
series = {ITS '15},
pages = {103--106},
address = {New York, NY, USA},
publisher = {ACM},
note = {panhey2015its},
abstract = {This paper investigates how common interaction techniques for large displays impact on recall in learning tasks. Our work is motivated by results of prior research in different areas that attribute a positive effect of interactivity to cognition. We present findings from a controlled lab experiment with 32 participants comparing mobile phone-based interaction, touch interaction and full-body interaction to a non-interactive baseline. In contrast to prior findings, our results reveal that more movement can negatively influence recall. In particular we show that designers are facing an immanent trade-off between designing engaging interaction through extensive movement and creating memorable content.},
acmid = {2817732},
doi = {10.1145/2817721.2817732},
isbn = {978-1-4503-3899-8},
keywords = {cognition, interactivity, pervasive displays, recall},
location = {Madeira, Portugal},
numpages = {4},
timestamp = {2015.11.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/panhey2015its.pdf},
}
M. Pfeiffer, T. Dünte, S. Schneegass, F. Alt, and M. Rohs, “Cruise control for pedestrians: controlling walking direction using electrical muscle stimulation,” in Proceedings of the 33rd annual acm conference on human factors in computing systems, New York, NY, USA, 2015, p. 2505–2514. doi:10.1145/2702123.2702190
[BibTeX] [Abstract] [Download PDF]
Pedestrian navigation systems require users to perceive, interpret, and react to navigation information. This can tax cognition as navigation information competes with information from the real world. We propose actuated navigation, a new kind of pedestrian navigation in which the user does not need to attend to the navigation task at all. An actuation signal is directly sent to the human motor system to influence walking direction. To achieve this goal we stimulate the sartorius muscle using electrical muscle stimulation. The rotation occurs during the swing phase of the leg and can easily be counteracted. The user therefore stays in control. We discuss the properties of actuated navigation and present a lab study on identifying basic parameters of the technique as well as an outdoor study in a park. The results show that our approach changes a user’s walking direction by about 16°/m on average and that the system can successfully steer users in a park with crowded areas, distractions, obstacles, and uneven ground.
@InProceedings{pfeiffer2015chi,
author = {Pfeiffer, Max and D\"{u}nte, Tim and Schneegass, Stefan and Alt, Florian and Rohs, Michael},
title = {Cruise Control for Pedestrians: Controlling Walking Direction Using Electrical Muscle Stimulation},
booktitle = {Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems},
year = {2015},
series = {CHI '15},
pages = {2505--2514},
address = {New York, NY, USA},
publisher = {ACM},
note = {pfeiffer2015chi},
abstract = {Pedestrian navigation systems require users to perceive, interpret, and react to navigation information. This can tax cognition as navigation information competes with information from the real world. We propose actuated navigation, a new kind of pedestrian navigation in which the user does not need to attend to the navigation task at all. An actuation signal is directly sent to the human motor system to influence walking direction. To achieve this goal we stimulate the sartorius muscle using electrical muscle stimulation. The rotation occurs during the swing phase of the leg and can easily be counteracted. The user therefore stays in control. We discuss the properties of actuated navigation and present a lab study on identifying basic parameters of the technique as well as an outdoor study in a park. The results show that our approach changes a user's walking direction by about 16°/m on average and that the system can successfully steer users in a park with crowded areas, distractions, obstacles, and uneven ground.},
acmid = {2702190},
doi = {10.1145/2702123.2702190},
isbn = {978-1-4503-3145-6},
keywords = {actuated navigation, electrical muscle stimulation, haptic feedback, pedestrian navigation, wearable devices},
location = {Seoul, Republic of Korea},
numpages = {10},
timestamp = {2015.04.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeiffer2015chi.pdf},
}

2014

F. Alt, N. Memarovic, M. Greis, and N. Henze, “UniDisplay – A Research Prototype to Investigate Expectations Towards Public Display Applications,” in Proceedings of the 1st Workshop on Developing Applications for Pervasive Display Networks, 2014.
[BibTeX] [Abstract] [Download PDF]
As public display networks become open, noveltypes of interaction applications emerge. In particular, we expectapplications that support user-generated content to rapidly gainimportance, since they provide a tangible benefit for the userin the form of digital bulletin boards, discussion platform thatfoster public engagement, and applications that allow for selfexpression.At the same time, such applications infer severalchallenges: first, they need to provide suitable means for thepasserby to contribute content to the application; second, mechanismsneed to be employed that provide sufficient control for thedisplay owner with regard to content moderation; and third, theusers’ expectations with regard to the posting procedure needsto be well understood. In this paper we present UniDisplay, aresearch prototype that enables users to post text and images toa public display. We report on the design and development ofthe application and provide early insights of the deployment ina University setting.
@InProceedings{alt2014pdapps,
author = {Alt, Florian and Memarovic, Nemanja AND Greis, Miriam and Henze, Niels},
title = {{UniDisplay - A Research Prototype to Investigate Expectations Towards Public Display Applications}},
booktitle = {{Proceedings of the 1st Workshop on Developing Applications for Pervasive Display Networks}},
year = {2014},
series = {PD-Apps '14},
publisher = {IEEE},
note = {alt2014pdapps},
abstract = {As public display networks become open, noveltypes of interaction applications emerge. In particular, we expectapplications that support user-generated content to rapidly gainimportance, since they provide a tangible benefit for the userin the form of digital bulletin boards, discussion platform thatfoster public engagement, and applications that allow for selfexpression.At the same time, such applications infer severalchallenges: first, they need to provide suitable means for thepasserby to contribute content to the application; second, mechanismsneed to be employed that provide sufficient control for thedisplay owner with regard to content moderation; and third, theusers’ expectations with regard to the posting procedure needsto be well understood. In this paper we present UniDisplay, aresearch prototype that enables users to post text and images toa public display. We report on the design and development ofthe application and provide early insights of the deployment ina University setting.},
keywords = {UniDisplay, Public Displays},
location = {Budapest, Israel},
numpages = {6},
owner = {florianalt},
timestamp = {2014.03.28},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2014pdapps.pdf},
}
F. Alt, S. Schneegass, J. Auda, R. Rzayev, and N. Broy, “Using eye-tracking to support interaction with layered 3d interfaces on stereoscopic displays,” in Proceedings of the 19th international conference on intelligent user interfaces, New York, NY, USA, 2014, p. 267–272. doi:10.1145/2557500.2557518
[BibTeX] [Abstract] [Download PDF]
In this paper, we investigate the concept of gaze-based interaction with 3D user interfaces. We currently see stereo vision displays becoming ubiquitous, particularly as auto-stereoscopy enables the perception of 3D content without the use of glasses. As a result, application areas for 3D beyond entertainment in cinema or at home emerge, including work settings, mobile phones, public displays, and cars. At the same time, eye tracking is hitting the consumer market with low-cost devices. We envision eye trackers in the future to be integrated with consumer devices (laptops, mobile phones, displays), hence allowing the user’s gaze to be analyzed and used as input for interactive applications. A particular challenge when applying this concept to 3D displays is that current eye trackers provide the gaze point in 2D only (x and y coordinates). In this paper, we compare the performance of two methods that use the eye’s physiology for calculating the gaze point in 3D space, hence enabling gaze-based interaction with stereoscopic content. Furthermore, we provide a comparison of gaze interaction in 2D and 3D with regard to user experience and performance. Our results show that with current technology, eye tracking on stereoscopic displays is possible with similar performance as on standard 2D screens.
@InProceedings{alt2014iui,
author = {Alt, Florian and Schneegass, Stefan and Auda, Jonas and Rzayev, Rufat and Broy, Nora},
title = {Using Eye-tracking to Support Interaction with Layered 3D Interfaces on Stereoscopic Displays},
booktitle = {Proceedings of the 19th International Conference on Intelligent User Interfaces},
year = {2014},
series = {IUI '14},
pages = {267--272},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2014iui},
abstract = {In this paper, we investigate the concept of gaze-based interaction with 3D user interfaces. We currently see stereo vision displays becoming ubiquitous, particularly as auto-stereoscopy enables the perception of 3D content without the use of glasses. As a result, application areas for 3D beyond entertainment in cinema or at home emerge, including work settings, mobile phones, public displays, and cars. At the same time, eye tracking is hitting the consumer market with low-cost devices. We envision eye trackers in the future to be integrated with consumer devices (laptops, mobile phones, displays), hence allowing the user's gaze to be analyzed and used as input for interactive applications. A particular challenge when applying this concept to 3D displays is that current eye trackers provide the gaze point in 2D only (x and y coordinates). In this paper, we compare the performance of two methods that use the eye's physiology for calculating the gaze point in 3D space, hence enabling gaze-based interaction with stereoscopic content. Furthermore, we provide a comparison of gaze interaction in 2D and 3D with regard to user experience and performance. Our results show that with current technology, eye tracking on stereoscopic displays is possible with similar performance as on standard 2D screens.},
acmid = {2557518},
doi = {10.1145/2557500.2557518},
isbn = {978-1-4503-2184-6},
keywords = {3d, eye tracking, gaze interaction, stereoscopic displays},
location = {Haifa, Israel},
numpages = {6},
timestamp = {2014.02.24},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2014iui.pdf},
}
N. Broy, S. Höckh, A. Frederiksen, M. Gilowski, J. Eichhorn, F. Naser, H. Jung, J. Niemann, M. Schell, A. Schmid, and F. Alt, “Exploring design parameters for a 3d head-up display,” in Proceedings of the international symposium on pervasive displays, New York, NY, USA, 2014, p. 38:38–38:43. doi:10.1145/2611009.2611011
[BibTeX] [Abstract] [Download PDF]
Today, head-up displays (HUDs) are commonly used in cars to show basic driving information in the visual field of the viewer. This allows information to be perceived in a quick and easy to understand manner. With advances in technology, HUDs will allow richer information to be conveyed to the driver by exploiting the third dimension. We envision a stereoscopic HUD for displaying content in 3D space. This requires an understanding of how parallaxes impact the user’s performance and comfort, which is the focus of this work. In two user studies, involving 49 participants, we (a) gather insights into how projection distances and stereoscopic visualizations influence the comfort zone and (b) the depth judgment of the user. The results show that with larger projection distances both the comfort zone and the minimum comfortable viewing distance increase. Higher distances between the viewer and a real world object to be judged decrease the judgment accuracy.
@InProceedings{broy2014perdis,
author = {Broy, Nora and H\"{o}ckh, Simone and Frederiksen, Annette and Gilowski, Michael and Eichhorn, Julian and Naser, Felix and Jung, Horst and Niemann, Julia and Schell, Martin and Schmid, Albrecht and Alt, Florian},
title = {Exploring Design Parameters for a 3D Head-Up Display},
booktitle = {Proceedings of The International Symposium on Pervasive Displays},
year = {2014},
series = {PerDis '14},
pages = {38:38--38:43},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2014perdis},
abstract = {Today, head-up displays (HUDs) are commonly used in cars to show basic driving information in the visual field of the viewer. This allows information to be perceived in a quick and easy to understand manner. With advances in technology, HUDs will allow richer information to be conveyed to the driver by exploiting the third dimension. We envision a stereoscopic HUD for displaying content in 3D space. This requires an understanding of how parallaxes impact the user's performance and comfort, which is the focus of this work. In two user studies, involving 49 participants, we (a) gather insights into how projection distances and stereoscopic visualizations influence the comfort zone and (b) the depth judgment of the user. The results show that with larger projection distances both the comfort zone and the minimum comfortable viewing distance increase. Higher distances between the viewer and a real world object to be judged decrease the judgment accuracy.},
acmid = {2611011},
articleno = {38},
doi = {10.1145/2611009.2611011},
isbn = {978-1-4503-2952-1},
keywords = {3D Displays, Automotive UIs, Head-Up Displays, Human Factors},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014perdis.pdf},
}
N. Broy, S. Schneegass, F. Alt, and A. Schmidt, “Framebox and mirrorbox: tools and guidelines to support designers in prototyping interfaces for 3d displays,” in Proceedings of the 32nd annual acm conference on human factors in computing systems, New York, NY, USA, 2014, p. 2037–2046. doi:10.1145/2556288.2557183
[BibTeX] [Abstract] [Download PDF]
In this paper, we identify design guidelines for stereoscopic 3D (S3D) user interfaces (UIs) and present the MirrorBox and the FrameBox, two UI prototyping tools for S3D displays. As auto-stereoscopy becomes available for the mass market we believe the design of S3D UIs for devices, for example, mobile phones, public displays, or car dashboards, will rapidly gain importance. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. For example, important information can be shown in front of less important information. This paper identifies core requirements for designing S3D UIs and derives concrete guidelines. The requirements also serve as a basis for two depth layout tools we built with the aim to overcome limitations of traditional prototyping when sketching S3D UIs. We evaluated the tools with usability experts and compared them to traditional paper prototyping.
@InProceedings{broy2014chi,
author = {Broy, Nora and Schneegass, Stefan and Alt, Florian and Schmidt, Albrecht},
title = {FrameBox and MirrorBox: Tools and Guidelines to Support Designers in Prototyping Interfaces for 3D Displays},
booktitle = {Proceedings of the 32Nd Annual ACM Conference on Human Factors in Computing Systems},
year = {2014},
series = {CHI '14},
pages = {2037--2046},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2014chi},
abstract = {In this paper, we identify design guidelines for stereoscopic 3D (S3D) user interfaces (UIs) and present the MirrorBox and the FrameBox, two UI prototyping tools for S3D displays. As auto-stereoscopy becomes available for the mass market we believe the design of S3D UIs for devices, for example, mobile phones, public displays, or car dashboards, will rapidly gain importance. A benefit of such UIs is that they can group and structure information in a way that makes them easily perceivable for the user. For example, important information can be shown in front of less important information. This paper identifies core requirements for designing S3D UIs and derives concrete guidelines. The requirements also serve as a basis for two depth layout tools we built with the aim to overcome limitations of traditional prototyping when sketching S3D UIs. We evaluated the tools with usability experts and compared them to traditional paper prototyping.},
acmid = {2557183},
doi = {10.1145/2556288.2557183},
isbn = {978-1-4503-2473-1},
keywords = {prototyping, stereoscopic 3d, user interfaces},
location = {Toronto, Ontario, Canada},
numpages = {10},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014chi.pdf},
}
N. Broy, B. J. Zierer, S. Schneegass, and F. Alt, “Exploring virtual depth for automotive instrument cluster concepts,” in Proceedings of the extended abstracts of the 32nd annual acm conference on human factors in computing systems, New York, NY, USA, 2014, p. 1783–1788. doi:10.1145/2559206.2581362
[BibTeX] [Abstract] [Download PDF]
This paper compares the user experience of three novel concept designs for 3D-based car dashboards. Our work is motivated by the fact that analogue dashboards are currently being replaced by their digital counterparts. At the same time, auto-stereoscopic displays enter the market, allowing the quality of novel dashboards to be increased, both with regard to the perceived quality and in supporting the driving task. Since no guidelines or principles exist for the design of digital 3D dashboards, we take an initial step in designing and evaluating such interfaces. In a study with 12 participants we were able to show that stereoscopic 3D increases the perceived quality of the display while motion parallax leads to a rather disturbing experience.
@InProceedings{broy2014chiea,
author = {Broy, Nora and Zierer, Benedikt J. and Schneegass, Stefan and Alt, Florian},
title = {Exploring Virtual Depth for Automotive Instrument Cluster Concepts},
booktitle = {Proceedings of the Extended Abstracts of the 32Nd Annual ACM Conference on Human Factors in Computing Systems},
year = {2014},
series = {CHI EA '14},
pages = {1783--1788},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2014chiea},
abstract = {This paper compares the user experience of three novel concept designs for 3D-based car dashboards. Our work is motivated by the fact that analogue dashboards are currently being replaced by their digital counterparts. At the same time, auto-stereoscopic displays enter the market, allowing the quality of novel dashboards to be increased, both with regard to the perceived quality and in supporting the driving task. Since no guidelines or principles exist for the design of digital 3D dashboards, we take an initial step in designing and evaluating such interfaces. In a study with 12 participants we were able to show that stereoscopic 3D increases the perceived quality of the display while motion parallax leads to a rather disturbing experience.},
acmid = {2581362},
doi = {10.1145/2559206.2581362},
isbn = {978-1-4503-2474-8},
keywords = {automotive user interfaces, motion parallax, stereoscopic 3d, user experience},
location = {Toronto, Ontario, Canada},
numpages = {6},
timestamp = {2014.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014chiea.pdf},
}
N. Davies, S. Clinch, and F. Alt, Pervasive Displays – Understanding the Future of Digital Signage, Morgan and Claypool Publishers, 2014.
[BibTeX] [Abstract] [Download PDF]
Fueled by falling display hardware costs and rising demand, digital signage and pervasive displaysare becoming ever more ubiquitous. Such systems have traditionally been used for advertising andinformation dissemination with digital signage commonplace in shopping malls, airports and publicspaces.While advertising and broadcasting announcements remain important applications, developmentsin sensing and interaction technologies are enabling entirely newclasses of display applicationsthat tailor content to the situation and audience of the display. As a result, signage systems arebeginning to transition from simple broadcast systems to rich platforms for communication andinteraction.In this lecture we provide an introduction to this emerging field for researchers and practitionersinterested in creating state-of-the-art pervasive display systems. We begin by describingthe history of pervasive display research, providing illustrations of key systems, from pioneeringwork on supporting collaboration to contemporary systems designed for personalized informationdelivery.We then consider what the near-future might hold for display networks—describing a seriesof compelling applications that are being postulated for future display networks. Creating suchsystems raises a wide range of challenges and requires designers to make a series of important tradeoffs.We dedicate four chapters to key aspects of pervasive display design: audience engagement,display interaction, system software and system evaluation. These chapters provide an overview ofcurrent thinking in each area. Finally, we present a series of case studies of display systems and ourconcluding remarks.
@Book{davies2014synthesis,
title = {{Pervasive Displays - Understanding the Future of Digital Signage}},
publisher = {Morgan and Claypool Publishers},
year = {2014},
author = {Nigel Davies AND Sarah Clinch AND Florian Alt},
series = {Synthesis Lectures},
note = {davies2014synthesis},
abstract = {Fueled by falling display hardware costs and rising demand, digital signage and pervasive displaysare becoming ever more ubiquitous. Such systems have traditionally been used for advertising andinformation dissemination with digital signage commonplace in shopping malls, airports and publicspaces.While advertising and broadcasting announcements remain important applications, developmentsin sensing and interaction technologies are enabling entirely newclasses of display applicationsthat tailor content to the situation and audience of the display. As a result, signage systems arebeginning to transition from simple broadcast systems to rich platforms for communication andinteraction.In this lecture we provide an introduction to this emerging field for researchers and practitionersinterested in creating state-of-the-art pervasive display systems. We begin by describingthe history of pervasive display research, providing illustrations of key systems, from pioneeringwork on supporting collaboration to contemporary systems designed for personalized informationdelivery.We then consider what the near-future might hold for display networks—describing a seriesof compelling applications that are being postulated for future display networks. Creating suchsystems raises a wide range of challenges and requires designers to make a series of important tradeoffs.We dedicate four chapters to key aspects of pervasive display design: audience engagement,display interaction, system software and system evaluation. These chapters provide an overview ofcurrent thinking in each area. Finally, we present a series of case studies of display systems and ourconcluding remarks.},
booktitle = {Pervasive Displays - Understanding the Future of Digital Signage},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/davies2014synthesis.pdf},
}
M. Greis, F. Alt, N. Henze, and N. Memarovic, “I can wait a minute: uncovering the optimal delay time for pre-moderated user-generated content on public displays,” in Proceedings of the sigchi conference on human factors in computing systems, New York, NY, USA, 2014, p. 1435–1438. doi:10.1145/2556288.2557186
[BibTeX] [Abstract] [Download PDF]
Public displays have advanced from isolated and non interactive “ad” displays which show images and videos to displays that are networked, interactive, and open to a wide variety of content and applications. Prior work has shown large potential of user-generated content on public displays. However, one of the problems with user-generated content on public displays is moderation as content may be explicit or troublesome for a particular location. In this work we explore the expectations of users with regard to content moderation on public displays. An online survey revealed that people not only think that display content should be moderated but also that a delay of up to 10 minutes is acceptable if display content is moderated. In a subsequent in the wild deployment we compared different moderation delays. We found that a moderation delay significantly decreases the number of user-generated posts while at the same time there is no significant effect on users’ decision to repeatedly post on the display.
@InProceedings{greis2014chi,
author = {Greis, Miriam and Alt, Florian and Henze, Niels and Memarovic, Nemanja},
title = {I Can Wait a Minute: Uncovering the Optimal Delay Time for Pre-moderated User-generated Content on Public Displays},
booktitle = {Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
year = {2014},
series = {CHI '14},
pages = {1435--1438},
address = {New York, NY, USA},
publisher = {ACM},
note = {greis2014chi},
abstract = {Public displays have advanced from isolated and non interactive "ad" displays which show images and videos to displays that are networked, interactive, and open to a wide variety of content and applications. Prior work has shown large potential of user-generated content on public displays. However, one of the problems with user-generated content on public displays is moderation as content may be explicit or troublesome for a particular location. In this work we explore the expectations of users with regard to content moderation on public displays. An online survey revealed that people not only think that display content should be moderated but also that a delay of up to 10 minutes is acceptable if display content is moderated. In a subsequent in the wild deployment we compared different moderation delays. We found that a moderation delay significantly decreases the number of user-generated posts while at the same time there is no significant effect on users' decision to repeatedly post on the display.},
acmid = {2557186},
doi = {10.1145/2556288.2557186},
isbn = {978-1-4503-2473-1},
keywords = {content moderation, public displays, twitter},
location = {Toronto, Ontario, Canada},
numpages = {4},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/greis2014chi.pdf},
}
J. R. Häkkilä, M. Posti, S. Schneegass, F. Alt, K. Gultekin, and A. Schmidt, “Let me catch this!: experiencing interactive 3d cinema through collecting content with a mobile phone,” in Proceedings of the 32nd annual acm conference on human factors in computing systems, New York, NY, USA, 2014, p. 1011–1020. doi:10.1145/2556288.2557187
[BibTeX] [Abstract] [Download PDF]
The entertainment industry is going through a transformation, and technology development is affecting how we can enjoy and interact with the entertainment media content in new ways. In our work, we explore how to enable interaction with content in the context of 3D cinemas. This allows viewers to use their mobile phone to retrieve, for example, information on the artist of the soundtrack currently playing or a discount coupon on the watch the main actor is wearing. We are particularly interested in the user experience of the interactive 3D cinema concept, and how different interactive elements and interaction techniques are perceived. We report on the development of a prototype application utilizing smart phones and on an evaluation in a cinema context with 20 participants. Results emphasize that designing for interactive cinema experiences should drive for holistic and positive user experiences. Interactive content should be tied together with the actual video content, but integrated into contexts where it does not conflict with the immersive experience with the movie.
@InProceedings{hakkila2014chi,
author = {H\"{a}kkil\"{a}, Jonna R. and Posti, Maaret and Schneegass, Stefan and Alt, Florian and Gultekin, Kunter and Schmidt, Albrecht},
title = {Let Me Catch This!: Experiencing Interactive 3D Cinema Through Collecting Content with a Mobile Phone},
booktitle = {Proceedings of the 32Nd Annual ACM Conference on Human Factors in Computing Systems},
year = {2014},
series = {CHI '14},
pages = {1011--1020},
address = {New York, NY, USA},
publisher = {ACM},
note = {hakkila2014chi},
abstract = {The entertainment industry is going through a transformation, and technology development is affecting how we can enjoy and interact with the entertainment media content in new ways. In our work, we explore how to enable interaction with content in the context of 3D cinemas. This allows viewers to use their mobile phone to retrieve, for example, information on the artist of the soundtrack currently playing or a discount coupon on the watch the main actor is wearing. We are particularly interested in the user experience of the interactive 3D cinema concept, and how different interactive elements and interaction techniques are perceived. We report on the development of a prototype application utilizing smart phones and on an evaluation in a cinema context with 20 participants. Results emphasize that designing for interactive cinema experiences should drive for holistic and positive user experiences. Interactive content should be tied together with the actual video content, but integrated into contexts where it does not conflict with the immersive experience with the movie.},
acmid = {2557187},
doi = {10.1145/2556288.2557187},
isbn = {978-1-4503-2473-1},
keywords = {3d, interactive cinema, mobile phone interaction, user experience, user studies},
location = {Toronto, Ontario, Canada},
numpages = {10},
timestamp = {2014.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/hakkila2014chi.pdf},
}
M. Pfeiffer, S. Schneegass, F. Alt, and M. Rohs, “Let me grab this: a comparison of ems and vibration for haptic feedback in free-hand interaction,” in Proceedings of the 5th augmented human international conference, New York, NY, USA, 2014, p. 48:1–48:8. doi:10.1145/2582051.2582099
[BibTeX] [Abstract] [Download PDF]
Free-hand interaction with large displays is getting more common, for example in public settings and exertion games. Adding haptic feedback offers the potential for more realistic and immersive experiences. While vibrotactile feedback is well known, electrical muscle stimulation (EMS) has not yet been explored in free-hand interaction with large displays. EMS offers a wide range of different strengths and qualities of haptic feedback. In this paper we first systematically investigate the design space for haptic feedback. Second, we experimentally explore differences between strengths of EMS and vibrotactile feedback. Third, based on the results, we evaluate EMS and vibrotactile feedback with regard to different virtual objects (soft, hard) and interaction with different gestures (touch, grasp, punch) in front of a large display. The results provide a basis for the design of haptic feedback that is appropriate for the given type of interaction and the material.
@InProceedings{pfeiffer2014ah,
author = {Pfeiffer, Max and Schneegass, Stefan and Alt, Florian and Rohs, Michael},
title = {Let Me Grab This: A Comparison of EMS and Vibration for Haptic Feedback in Free-hand Interaction},
booktitle = {Proceedings of the 5th Augmented Human International Conference},
year = {2014},
series = {AH '14},
pages = {48:1--48:8},
address = {New York, NY, USA},
publisher = {ACM},
note = {pfeiffer2014ah},
abstract = {Free-hand interaction with large displays is getting more common, for example in public settings and exertion games. Adding haptic feedback offers the potential for more realistic and immersive experiences. While vibrotactile feedback is well known, electrical muscle stimulation (EMS) has not yet been explored in free-hand interaction with large displays. EMS offers a wide range of different strengths and qualities of haptic feedback. In this paper we first systematically investigate the design space for haptic feedback. Second, we experimentally explore differences between strengths of EMS and vibrotactile feedback. Third, based on the results, we evaluate EMS and vibrotactile feedback with regard to different virtual objects (soft, hard) and interaction with different gestures (touch, grasp, punch) in front of a large display. The results provide a basis for the design of haptic feedback that is appropriate for the given type of interaction and the material.},
acmid = {2582099},
articleno = {48},
doi = {10.1145/2582051.2582099},
isbn = {978-1-4503-2761-9},
keywords = {electrical muscle stimulation, free-hand interaction, haptic feedback, large displays, tactile feedback},
location = {Kobe, Japan},
numpages = {8},
timestamp = {2014.03.07},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeiffer2014ah.pdf},
}
M. Pfeiffer, S. Schneegass, F. Alt, and M. Rohs, “A Design Space for Electrical Muscle Stimulation Feedback for Freehand Interaction,” in Proceedings of the First CHI Workshop on Assistive Augmentation, 2014.
[BibTeX] [Abstract] [Download PDF]
Free-hand interaction becomes a common technique forinteracting with large displays. At the same time,providing haptic feedback for free-hand interaction is stilla challenge, particularly feedback with dierentcharacteristics (i.e., strengths, patterns) to conveyparticular information. We see electrical musclestimulation (EMS) as a well-suited technology forproviding haptic feedback in this domain. Thecharacteristics of EMS can be used to assist users inlearning, manipulating, and perceiving virtual objects.One of the core challenges is to understand thesecharacteristics and how they can be applied. As a step inthis direction, this paper presents a design space thatidenties dierent aspects of using EMS for hapticfeedback. The design space is meant as a basis for futureresearch investigating how particular characteristics can beexploited to provide specic haptic feedback.
@InProceedings{pfeiffer2014asstech,
author = {Pfeiffer, Max AND Schneegass, Stefan AND Alt, Florian and Rohs, Michael},
title = {{A Design Space for Electrical Muscle Stimulation Feedback for Freehand Interaction}},
booktitle = {{Proceedings of the First CHI Workshop on Assistive Augmentation}},
year = {2014},
series = {Assistive Augmentation '14},
note = {pfeiffer2014asstech},
abstract = {Free-hand interaction becomes a common technique forinteracting with large displays. At the same time,providing haptic feedback for free-hand interaction is stilla challenge, particularly feedback with dierentcharacteristics (i.e., strengths, patterns) to conveyparticular information. We see electrical musclestimulation (EMS) as a well-suited technology forproviding haptic feedback in this domain. Thecharacteristics of EMS can be used to assist users inlearning, manipulating, and perceiving virtual objects.One of the core challenges is to understand thesecharacteristics and how they can be applied. As a step inthis direction, this paper presents a design space thatidenties dierent aspects of using EMS for hapticfeedback. The design space is meant as a basis for futureresearch investigating how particular characteristics can beexploited to provide specic haptic feedback.},
location = {Toronto, Canada},
numpages = {6},
timestamp = {2014.04.26},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeiffer2014asstech.pdf},
}
S. Schneegass and F. Alt, “Senscreen: a toolkit for supporting sensor-enabled multi-display networks,” in Proceedings of the international symposium on pervasive displays, New York, NY, USA, 2014, p. 92:92–92:97. doi:10.1145/2611009.2611017
[BibTeX] [Abstract] [Download PDF]
Over the past years, a number of sensors have emerged, that enable gesture-based interaction with public display applications, including Microsoft Kinect, Asus Xtion, and Leap Motion. In this way, interaction with displays can be made more attractive, particularly if deployed across displays hence involving many users. However, interactive applications are still scarce, which can be attributed to the fact that developers usually need to implement a low-level connection to the sensor. In this work, we tackle this issue by presenting a toolkit, called SenScreen, consisting of (a) easy-to-install adapters that handle the low-level connection to sensors and provides the data via (b) an API that allows developers to write their applications in JavaScript. We evaluate our approach by letting two groups of developers create an interactive game each using our toolkit. Observation, interviews, and questionnaire indicate that our toolkit simplifies the implementation of interactive applications and may, hence, serve as a first step towards a more widespread use of interactive public displays.
@InProceedings{schneegass2014perdis2,
author = {Schneegass, Stefan and Alt, Florian},
title = {SenScreen: A Toolkit for Supporting Sensor-enabled Multi-Display Networks},
booktitle = {Proceedings of The International Symposium on Pervasive Displays},
year = {2014},
series = {PerDis '14},
pages = {92:92--92:97},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneegass2014perdis2},
abstract = {Over the past years, a number of sensors have emerged, that enable gesture-based interaction with public display applications, including Microsoft Kinect, Asus Xtion, and Leap Motion. In this way, interaction with displays can be made more attractive, particularly if deployed across displays hence involving many users. However, interactive applications are still scarce, which can be attributed to the fact that developers usually need to implement a low-level connection to the sensor. In this work, we tackle this issue by presenting a toolkit, called SenScreen, consisting of (a) easy-to-install adapters that handle the low-level connection to sensors and provides the data via (b) an API that allows developers to write their applications in JavaScript. We evaluate our approach by letting two groups of developers create an interactive game each using our toolkit. Observation, interviews, and questionnaire indicate that our toolkit simplifies the implementation of interactive applications and may, hence, serve as a first step towards a more widespread use of interactive public displays.},
acmid = {2611017},
articleno = {92},
doi = {10.1145/2611009.2611017},
isbn = {978-1-4503-2952-1},
keywords = {Interactive Applications, Public Display Architecture, Toolkits},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014perdis2.pdf},
}
S. Schneegass, F. Alt, J. Scheible, and A. Schmidt, “Midair displays: concept and first experiences with free-floating pervasive displays,” in Proceedings of the international symposium on pervasive displays, New York, NY, USA, 2014, p. 27:27–27:31. doi:10.1145/2611009.2611013
[BibTeX] [Abstract] [Download PDF]
Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public space. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or as group displays. We see midair displays as a complementary technology to wearable displays. In contrast to statically deployed displays they allow information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, e.g., copter drones, such displays can be easily built. A study on the readability of such displays showcases the potential and feasibility of the concept and provides early insights.
@InProceedings{schneegass2014perdis1,
author = {Schneegass, Stefan and Alt, Florian and Scheible, J\"{u}rgen and Schmidt, Albrecht},
title = {Midair Displays: Concept and First Experiences with Free-Floating Pervasive Displays},
booktitle = {Proceedings of The International Symposium on Pervasive Displays},
year = {2014},
series = {PerDis '14},
pages = {27:27--27:31},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneegass2014perdis1},
abstract = {Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public space. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or as group displays. We see midair displays as a complementary technology to wearable displays. In contrast to statically deployed displays they allow information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, e.g., copter drones, such displays can be easily built. A study on the readability of such displays showcases the potential and feasibility of the concept and provides early insights.},
acmid = {2611013},
articleno = {27},
doi = {10.1145/2611009.2611013},
isbn = {978-1-4503-2952-1},
keywords = {Drones, Free-Floating Displays, Interaction Techniques, Midair Displays, Pervasive Display},
location = {Copenhagen, Denmark},
numpages = {5},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014perdis1.pdf},
}
S. Schneegass, F. Alt, J. Scheible, A. Schmidt, and H. Su, “Midair displays: exploring the concept of free-floating public displays,” in Chi ’14 extended abstracts on human factors in computing systems, New York, NY, USA, 2014, p. 2035–2040. doi:10.1145/2559206.2581190
[BibTeX] [Abstract] [Download PDF]
Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public spaces. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or they could be used as group displays which enable information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, for example copter drones, such displays can be easily built.
@InProceedings{schneegass2014chiea,
author = {Schneegass, Stefan and Alt, Florian and Scheible, J\"{u}rgen and Schmidt, Albrecht and Su, Haifeng},
title = {Midair Displays: Exploring the Concept of Free-floating Public Displays},
booktitle = {CHI '14 Extended Abstracts on Human Factors in Computing Systems},
year = {2014},
series = {CHI EA '14},
pages = {2035--2040},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneegass2014chiea},
abstract = {Due to advances in technology, displays could replace literally any surface in the future, including walls, windows, and ceilings. At the same time, midair remains a relatively unexplored domain for the use of displays as of today, particularly in public spaces. Nevertheless, we see large potential in the ability to make displays appear at any possible point in space, both indoors and outdoors. Such displays, that we call midair displays, could control large crowds in emergency situations, they could be used during sports for navigation and feedback on performance, or they could be used as group displays which enable information to be brought to the user anytime and anywhere. We explore the concept of midair displays and show that with current technology, for example copter drones, such displays can be easily built.},
acmid = {2581190},
doi = {10.1145/2559206.2581190},
isbn = {978-1-4503-2474-8},
keywords = {drones, midair displays, public displays},
location = {Toronto, Ontario, Canada},
numpages = {6},
timestamp = {2014.04.27},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014chiea.pdf},
}
S. Schneegass, F. Steimle, A. Bulling, F. Alt, and A. Schmidt, “Smudgesafe: geometric image transformations for smudge-resistant user authentication,” in Proceedings of the 2014 acm international joint conference on pervasive and ubiquitous computing, New York, NY, USA, 2014, p. 775–786. doi:10.1145/2632048.2636090
[BibTeX] [Abstract] [Download PDF]
Touch-enabled user interfaces have become ubiquitous, such as on ATMs or portable devices. At the same time, authentication using touch input is problematic, since finger smudge traces may allow attackers to reconstruct passwords. We present SmudgeSafe, an authentication system that uses random geometric image transformations, such as translation, rotation, scaling, shearing, and flipping, to increase the security of cued-recall graphical passwords. We describe the design space of these transformations and report on two user studies: A lab-based security study involving 20 participants in attacking user-defined passwords, using high quality pictures of real smudge traces captured on a mobile phone display; and an in-the-field usability study with 374 participants who generated more than 130,000 logins on a mobile phone implementation of SmudgeSafe. Results show that SmudgeSafe significantly increases security compared to authentication schemes based on PINs and lock patterns, and exhibits very high learnability, efficiency, and memorability.
@InProceedings{schneegass2014ubicomp,
author = {Schneegass, Stefan and Steimle, Frank and Bulling, Andreas and Alt, Florian and Schmidt, Albrecht},
title = {SmudgeSafe: Geometric Image Transformations for Smudge-resistant User Authentication},
booktitle = {Proceedings of the 2014 ACM International Joint Conference on Pervasive and Ubiquitous Computing},
year = {2014},
series = {UbiComp '14},
pages = {775--786},
address = {New York, NY, USA},
publisher = {ACM},
note = {schneegass2014ubicomp},
abstract = {Touch-enabled user interfaces have become ubiquitous, such as on ATMs or portable devices. At the same time, authentication using touch input is problematic, since finger smudge traces may allow attackers to reconstruct passwords. We present SmudgeSafe, an authentication system that uses random geometric image transformations, such as translation, rotation, scaling, shearing, and flipping, to increase the security of cued-recall graphical passwords. We describe the design space of these transformations and report on two user studies: A lab-based security study involving 20 participants in attacking user-defined passwords, using high quality pictures of real smudge traces captured on a mobile phone display; and an in-the-field usability study with 374 participants who generated more than 130,000 logins on a mobile phone implementation of SmudgeSafe. Results show that SmudgeSafe significantly increases security compared to authentication schemes based on PINs and lock patterns, and exhibits very high learnability, efficiency, and memorability.},
acmid = {2636090},
doi = {10.1145/2632048.2636090},
isbn = {978-1-4503-2968-2},
keywords = {finger smudge traces, graphical passwords, touch input},
location = {Seattle, Washington},
numpages = {12},
timestamp = {2014.09.15},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2014ubicomp.pdf},
}
F. Steinberger, M. Foth, and F. Alt, “Vote with your feet: local community polling on urban screens,” in Proceedings of the international symposium on pervasive displays, New York, NY, USA, 2014, p. 44:44–44:49. doi:10.1145/2611009.2611015
[BibTeX] [Abstract] [Download PDF]
Falling prices have led to an ongoing spread of public displays in urban areas. Still, they mostly show passive content such as commercials and digital signage. At the same time, technological advances have enabled the creation of interactive displays potentially increasing their attractiveness for the audience, e.g. through providing a platform for civic discourse. This poses considerable challenges, since displays need to communicate the opportunity to engage, motivate the audience to do so, and be easy to use. In this paper we present Vote With Your Feet, a hyperlocal public polling tool for urban screens allowing users to express their opinions. Similar to vox populi interviews on TV or polls on news websites, the tool is meant to reflect the mindset of the community on topics such as current affairs, cultural identity and local matters. It is novel in that it focuses on a situated civic discourse and provides a tangible user interface, tackling the mentioned challenges. It shows one Yes/No question at a time and enables users to vote by stepping on one of two tangible buttons on the ground. This user interface was introduced to attract people’s attention and to lower participation barriers. Our field study showed that Vote With Your Feet is perceived as inviting and that it can spark discussions among co-located people.
@InProceedings{steinberger2014perdis,
author = {Steinberger, Fabius and Foth, Marcus and Alt, Florian},
title = {Vote With Your Feet: Local Community Polling on Urban Screens},
booktitle = {Proceedings of The International Symposium on Pervasive Displays},
year = {2014},
series = {PerDis '14},
pages = {44:44--44:49},
address = {New York, NY, USA},
publisher = {ACM},
note = {steinberger2014perdis},
abstract = {Falling prices have led to an ongoing spread of public displays in urban areas. Still, they mostly show passive content such as commercials and digital signage. At the same time, technological advances have enabled the creation of interactive displays potentially increasing their attractiveness for the audience, e.g. through providing a platform for civic discourse. This poses considerable challenges, since displays need to communicate the opportunity to engage, motivate the audience to do so, and be easy to use. In this paper we present Vote With Your Feet, a hyperlocal public polling tool for urban screens allowing users to express their opinions. Similar to vox populi interviews on TV or polls on news websites, the tool is meant to reflect the mindset of the community on topics such as current affairs, cultural identity and local matters. It is novel in that it focuses on a situated civic discourse and provides a tangible user interface, tackling the mentioned challenges. It shows one Yes/No question at a time and enables users to vote by stepping on one of two tangible buttons on the ground. This user interface was introduced to attract people's attention and to lower participation barriers. Our field study showed that Vote With Your Feet is perceived as inviting and that it can spark discussions among co-located people.},
acmid = {2611015},
articleno = {44},
doi = {10.1145/2611009.2611015},
isbn = {978-1-4503-2952-1},
keywords = {Polling, civic engagement, public displays, tangible media, ubiquitous computing, urban computing, urban informatics, voting},
location = {Copenhagen, Denmark},
numpages = {6},
timestamp = {2014.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/steinberger2014perdis.pdf},
}
N. Broy, F. Alt, S. Schneegass, and B. Pfleging, “3d displays in cars: exploring the user performance for a stereoscopic instrument cluster,” in Proceedings of the 6th international conference on automotive user interfaces and interactive vehicular applications, New York, NY, USA, 2014, p. 2:1–2:9. doi:10.1145/2667317.2667319
[BibTeX] [Abstract] [Download PDF]
In this paper, we investigate user performance for stereoscopic automotive user interfaces (UI). Our work is motivated by the fact that stereoscopic displays are about to find their way into cars. Such a safety-critical application area creates an inherent need to understand how the use of stereoscopic 3D visualizations impacts user performance. We conducted a comprehensive study with 56 participants to investigate the impact of a 3D instrument cluster (IC) on primary and secondary task performance. We investigated different visualizations (2D and 3D) and complexities (low vs. high amount of details) of the IC as well as two 3D display technologies (shutter vs. autostereoscopy). As secondary tasks the participants judged spatial relations between UI elements (expected events) and reacted on pop-up instructions (unexpected events) in the IC. The results show that stereoscopy increases accuracy for expected events, decreases task completion times for unexpected tasks, and increases the attractiveness of the interface. Furthermore, we found a significant influence of the used technology, indicating that secondary task performance improves for shutter displays.
@InProceedings{broy2014autoui,
author = {Broy, Nora and Alt, Florian and Schneegass, Stefan and Pfleging, Bastian},
title = {3D Displays in Cars: Exploring the User Performance for a Stereoscopic Instrument Cluster},
booktitle = {Proceedings of the 6th International Conference on Automotive User Interfaces and Interactive Vehicular Applications},
year = {2014},
series = {AutomotiveUI '14},
pages = {2:1--2:9},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2014autoui},
abstract = {In this paper, we investigate user performance for stereoscopic automotive user interfaces (UI). Our work is motivated by the fact that stereoscopic displays are about to find their way into cars. Such a safety-critical application area creates an inherent need to understand how the use of stereoscopic 3D visualizations impacts user performance. We conducted a comprehensive study with 56 participants to investigate the impact of a 3D instrument cluster (IC) on primary and secondary task performance. We investigated different visualizations (2D and 3D) and complexities (low vs. high amount of details) of the IC as well as two 3D display technologies (shutter vs. autostereoscopy). As secondary tasks the participants judged spatial relations between UI elements (expected events) and reacted on pop-up instructions (unexpected events) in the IC. The results show that stereoscopy increases accuracy for expected events, decreases task completion times for unexpected tasks, and increases the attractiveness of the interface. Furthermore, we found a significant influence of the used technology, indicating that secondary task performance improves for shutter displays.},
acmid = {2667319},
articleno = {2},
doi = {10.1145/2667317.2667319},
isbn = {978-1-4503-3212-5},
keywords = {Automotive UIs, stereoscopic 3D, user performance},
location = {Seattle, WA, USA},
numpages = {9},
timestamp = {2014.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2014autoui.pdf},
}

2013

F. Alt, A Design Space for Pervasive Advertising on Public Displays, Stuttgart, Germany: Ph.D. Thesis, 2013.
[BibTeX] [Abstract] [Download PDF]
Today, people living in cities see up to 5000 ads per day and many of themare presented on public displays. More and more of these public displays arenetworked and equipped with various types of sensors, making them part of aglobal infrastructure that is currently emerging. Such networked and interactivepublic displays provide the opportunity to create a benefit for society in the formof immersive experiences and relevant content. In this way, they can overcomethe display blindness that evolved among passersby over the years. We see twomain reasons that prevent this vision from coming true: first, public displaysare stuck with traditional advertising as the driving business model, making itdifficult for novel, interactive applications to enter the scene. Second, no commonground exists for researchers or advertisers that outline important challenges. Theprovider view and audience view need to be addressed to make open, interactivedisplay networks, successful.The main contribution made by this thesis is presenting a design space for advertisingon public displays that identifies important challenges – mainly from ahuman-computer interaction perspective. Solutions to these core challenges arepresented and evaluated, using empirical methods commonly applied in HCI.First, we look at challenges that arise from the shared use of display space. Weconducted an observational study of traditional public notice areas that allowedus to identify different stakeholders, to understand their needs and motivations, tounveil current practices used to exercise control over the display, and to understandthe interplay between space, stakeholders, and content. We present a set of designimplications for open public display networks that we applied when implementingand evaluating a digital public notice area.Second, we tackle the challenge of making the user interact by taking a closerlook at attracting attention, communicating interactivity, and enticing interaction.Attracting attention is crucial for any further action to happen. We present anapproach that exploits gaze as a powerful input modality. By adapting contentbased on gaze, we are able to show a significant increase in attention and an effecton the user’s attitude. In order to communicate interactivity, we show that themirror representation of the user is a powerful interactivity cue. Finally, in orderto entice interaction, we show that the user needs to be motivated to interact andto understand how interaction works. Findings from our experiments reveal directtouch and the mobile phone as suitable interaction technologies. In addition, thesefindings suggest that relevance of content, privacy, and security have a stronginfluence on user motivation.
@Book{alt2013diss,
title = {{A Design Space for Pervasive Advertising on Public Displays}},
publisher = {Ph.D. Thesis},
year = {2013},
author = {Alt, Florian},
address = {Stuttgart, Germany},
note = {alt2013diss},
abstract = {Today, people living in cities see up to 5000 ads per day and many of themare presented on public displays. More and more of these public displays arenetworked and equipped with various types of sensors, making them part of aglobal infrastructure that is currently emerging. Such networked and interactivepublic displays provide the opportunity to create a benefit for society in the formof immersive experiences and relevant content. In this way, they can overcomethe display blindness that evolved among passersby over the years. We see twomain reasons that prevent this vision from coming true: first, public displaysare stuck with traditional advertising as the driving business model, making itdifficult for novel, interactive applications to enter the scene. Second, no commonground exists for researchers or advertisers that outline important challenges. Theprovider view and audience view need to be addressed to make open, interactivedisplay networks, successful.The main contribution made by this thesis is presenting a design space for advertisingon public displays that identifies important challenges – mainly from ahuman-computer interaction perspective. Solutions to these core challenges arepresented and evaluated, using empirical methods commonly applied in HCI.First, we look at challenges that arise from the shared use of display space. Weconducted an observational study of traditional public notice areas that allowedus to identify different stakeholders, to understand their needs and motivations, tounveil current practices used to exercise control over the display, and to understandthe interplay between space, stakeholders, and content. We present a set of designimplications for open public display networks that we applied when implementingand evaluating a digital public notice area.Second, we tackle the challenge of making the user interact by taking a closerlook at attracting attention, communicating interactivity, and enticing interaction.Attracting attention is crucial for any further action to happen. We present anapproach that exploits gaze as a powerful input modality. By adapting contentbased on gaze, we are able to show a significant increase in attention and an effecton the user’s attitude. In order to communicate interactivity, we show that themirror representation of the user is a powerful interactivity cue. Finally, in orderto entice interaction, we show that the user needs to be motivated to interact andto understand how interaction works. Findings from our experiments reveal directtouch and the mobile phone as suitable interaction technologies. In addition, thesefindings suggest that relevance of content, privacy, and security have a stronginfluence on user motivation.},
timestamp = {2013.12.07},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013diss.pdf},
}
F. Alt and B. Pfleging, “Sonify – A Platform for the Sonification of Text Messages,” in Proceedings of Mensch & Computer 2013, 2013.
[BibTeX] [Abstract] [Download PDF]
Sonification of text messages offers a great potential for personalization while at the same timeallowing rich information to be mediated. For example, ringtones are the major form of personalizationon smartphones besides apps and background images. Ringtones are often used as a form of selfexpressionby the smartphone owner (e.g., using ones favorite sound track as standard ringtone), butalso to identify the caller or sender of a message (e.g., the user knows who is calling without taking thephone out of the pocket). We believe this approach to be applicable to a wide variety of text messages,such as SMS, email, or IM. In this paper, we first present a web-based platform that allows usergeneratedmappings for text sonification to be created and managed. An API enables any application tosend a text message and receive the sonification in the form of a MIDI file. To showcase the potential,we implemented an Android app that sonifies incoming SMS. Second, we evaluate the feasibility of ourapproach and show that sonified messages are equally effective as ringtones when conveying metainformation.
@InProceedings{alt2013muc,
author = {Florian Alt AND Bastian Pfleging},
title = {{Sonify -- A Platform for the Sonification of Text Messages}},
booktitle = {{Proceedings of Mensch \& Computer 2013}},
year = {2013},
note = {alt2013muc},
abstract = {Sonification of text messages offers a great potential for personalization while at the same timeallowing rich information to be mediated. For example, ringtones are the major form of personalizationon smartphones besides apps and background images. Ringtones are often used as a form of selfexpressionby the smartphone owner (e.g., using ones favorite sound track as standard ringtone), butalso to identify the caller or sender of a message (e.g., the user knows who is calling without taking thephone out of the pocket). We believe this approach to be applicable to a wide variety of text messages,such as SMS, email, or IM. In this paper, we first present a web-based platform that allows usergeneratedmappings for text sonification to be created and managed. An API enables any application tosend a text message and receive the sonification in the form of a MIDI file. To showcase the potential,we implemented an Android app that sonifies incoming SMS. Second, we evaluate the feasibility of ourapproach and show that sonified messages are equally effective as ringtones when conveying metainformation.},
owner = {florianalt},
timestamp = {2013.10.04},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013muc.pdf},
}
F. Alt and S. Schneegaß, “Towards understanding the cognitive effects of interactivity,” in Proccedings of the 1st workshop on experiencing interactivity in public space (eips), 2013.
[BibTeX] [Abstract] [Download PDF]
Cheap and easy-to-deploy consumer hardware, such as theMicrosoft Kinect, touch screens, and smartphones drivean increasing proliferation of public space with interactiveapplications. Such applications include artistic, playful,and informative content on public displays. Though suchapplications are in general positively perceived by users,their benet is in many cases not clear. In this paper weargue that while most current (advertising) content onpublic displays aims at stimulating user action (e.g., makinga purchase), interactive applications are also suitableto support cognition. In our work, we focus on awarenessas one particular form of cognition and assess it by measuringrecall and recognition. This is not only interestingfor advertising but for any type of applications that requiresthe user to remember information. We contribute adesign space and map out directions for future research.
@InProceedings{alt2013eips,
author = {Florian Alt AND Stefan Schneega\ss},
title = {Towards Understanding the Cognitive Effects of Interactivity},
booktitle = {Proccedings of the 1st Workshop on Experiencing Interactivity in Public Space (EIPS)},
year = {2013},
series = {EIPS'13},
note = {alt2013eips},
abstract = {Cheap and easy-to-deploy consumer hardware, such as theMicrosoft Kinect, touch screens, and smartphones drivean increasing proliferation of public space with interactiveapplications. Such applications include artistic, playful,and informative content on public displays. Though suchapplications are in general positively perceived by users,their benet is in many cases not clear. In this paper weargue that while most current (advertising) content onpublic displays aims at stimulating user action (e.g., makinga purchase), interactive applications are also suitableto support cognition. In our work, we focus on awarenessas one particular form of cognition and assess it by measuringrecall and recognition. This is not only interestingfor advertising but for any type of applications that requiresthe user to remember information. We contribute adesign space and map out directions for future research.},
owner = {florianalt},
timestamp = {2013.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013eips.pdf},
}
F. Alt, S. Schneegass, M. Girgis, and A. Schmidt, “Cognitive effects of interactive public display applications,” in Proceedings of the 2nd acm international symposium on pervasive displays, New York, NY, USA, 2013, p. 13–18. doi:10.1145/2491568.2491572
[BibTeX] [Abstract] [Download PDF]
Many public displays are nowadays equipped with different types of sensors. Such displays allow engaging and persistent user experiences to be created, e.g., in the form of gesture-controlled games or content exploration using direct touch at the display. However, as digital displays replace traditional posters and billboards, display owners are reluctant to deploy interactive content, but rather adapt traditional, non-interactive content. The main reason is, that the benefit of such interactive deployments is not obvious. Our hypothesis is that interactivity has a cognitive effect on users and therefore increases the ability to remember what they have seen on the screen – which is beneficial both for the display owner and the user. In this paper we systematically investigate the impact of interactive content on public displays on the users’ cognition in different situations. Our findings indicate that overall memorability is positively affected as users interact. Based on these findings we discuss design implications for interactive public displays.
@InProceedings{alt2013perdis,
author = {Alt, Florian and Schneegass, Stefan and Girgis, Michael and Schmidt, Albrecht},
title = {Cognitive effects of interactive public display applications},
booktitle = {Proceedings of the 2nd ACM International Symposium on Pervasive Displays},
year = {2013},
series = {PerDis '13},
pages = {13--18},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2013perdis},
abstract = {Many public displays are nowadays equipped with different types of sensors. Such displays allow engaging and persistent user experiences to be created, e.g., in the form of gesture-controlled games or content exploration using direct touch at the display. However, as digital displays replace traditional posters and billboards, display owners are reluctant to deploy interactive content, but rather adapt traditional, non-interactive content. The main reason is, that the benefit of such interactive deployments is not obvious. Our hypothesis is that interactivity has a cognitive effect on users and therefore increases the ability to remember what they have seen on the screen -- which is beneficial both for the display owner and the user. In this paper we systematically investigate the impact of interactive content on public displays on the users' cognition in different situations. Our findings indicate that overall memorability is positively affected as users interact. Based on these findings we discuss design implications for interactive public displays.},
acmid = {2491572},
doi = {10.1145/2491568.2491572},
isbn = {978-1-4503-2096-2},
keywords = {digital signage, interactivity, public display, recall, recognition},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013perdis.pdf},
}
F. Alt, A. S. Shirazi, T. Kubitza, and A. Schmidt, “Interaction techniques for creating and exchanging content with public displays,” in Proceedings of the sigchi conference on human factors in computing systems, New York, NY, USA, 2013, p. 1709–1718. doi:10.1145/2470654.2466226
[BibTeX] [Abstract] [Download PDF]
Falling hardware prices and ever more displays being connected to the Internet will lead to large public display networks, potentially forming a novel communication medium. We envision that such networks are not restricted to display owners and advertisers anymore, but allow also passersby (e.g., customers) to exchange content, similar to traditional public notice areas, such as bulletin boards. In this context it is crucial to understand emerging practices and provide easy and straight forward interaction techniques to be used for creating and exchanging content. In this paper, we present Digifieds, a digital public notice area we built to investigate and compare possible interaction techniques. Based on a lab study we show that using direct touch at the display as well as using the mobile phone as a complementing interaction technology are most suitable. Direct touch at the display closely resembles the interaction known from classic bulletin boards and provides the highest usability. Mobile phones preserve the users’ privacy as they exchange (sensitive) data with the display and at the same time allow content to be created on-the-go or to be retrieved.
@InProceedings{alt2013chi,
author = {Alt, Florian and Shirazi, Alireza Sahami and Kubitza, Thomas and Schmidt, Albrecht},
title = {Interaction techniques for creating and exchanging content with public displays},
booktitle = {Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
year = {2013},
series = {CHI '13},
pages = {1709--1718},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2013chi},
abstract = {Falling hardware prices and ever more displays being connected to the Internet will lead to large public display networks, potentially forming a novel communication medium. We envision that such networks are not restricted to display owners and advertisers anymore, but allow also passersby (e.g., customers) to exchange content, similar to traditional public notice areas, such as bulletin boards. In this context it is crucial to understand emerging practices and provide easy and straight forward interaction techniques to be used for creating and exchanging content. In this paper, we present Digifieds, a digital public notice area we built to investigate and compare possible interaction techniques. Based on a lab study we show that using direct touch at the display as well as using the mobile phone as a complementing interaction technology are most suitable. Direct touch at the display closely resembles the interaction known from classic bulletin boards and provides the highest usability. Mobile phones preserve the users' privacy as they exchange (sensitive) data with the display and at the same time allow content to be created on-the-go or to be retrieved.},
acmid = {2466226},
doi = {10.1145/2470654.2466226},
isbn = {978-1-4503-1899-0},
keywords = {classified ads, digifieds, interaction, public displays},
location = {Paris, France},
numpages = {10},
owner = {florianalt},
timestamp = {2013.06.12},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2013chi.pdf},
}
N. Broy, F. Alt, S. Schneegass, N. Henze, and A. Schmidt, “Perceiving layered information on 3d displays using binocular disparity,” in Proceedings of the 2nd acm international symposium on pervasive displays, New York, NY, USA, 2013, p. 61–66. doi:10.1145/2491568.2491582
[BibTeX] [Abstract] [Download PDF]
3D displays are hitting the mass market. They are integrated in consumer TVs, notebooks, and mobile phones and are mainly used for virtual reality as well as video content. We see large potential in using depth also for structuring information. Our specific use case is 3D displays integrated in cars. The capabilities of such displays could be used to present relevant information to the driver in a fast and easy-to-understand way, e.g., by functionality-based clustering. However, excessive parallaxes can cause discomfort and in turn negatively influence the primary driving task. This requires a reasonable choice of parallax boundaries. The contribution of this paper is twofold. First, we identify the comfort zone when perceiving 3D content. Second, we determine a minimum depth distance between objects that still enables users to quickly and accurately separate the two depth planes. The results yield that in terms of task completion time the optimum distance from screen level is up to 35.9 arc-min angular disparity behind the screen plane. A distance of at least 2.7 arc-min difference in angular disparity between the objects significantly decreases time for layer separation. Based on the results we derive design implications.
@InProceedings{broy2013perdis,
author = {Broy, Nora and Alt, Florian and Schneegass, Stefan and Henze, Niels and Schmidt, Albrecht},
title = {Perceiving layered information on 3D displays using binocular disparity},
booktitle = {Proceedings of the 2nd ACM International Symposium on Pervasive Displays},
year = {2013},
series = {PerDis '13},
pages = {61--66},
address = {New York, NY, USA},
publisher = {ACM},
note = {broy2013perdis},
abstract = {3D displays are hitting the mass market. They are integrated in consumer TVs, notebooks, and mobile phones and are mainly used for virtual reality as well as video content. We see large potential in using depth also for structuring information. Our specific use case is 3D displays integrated in cars. The capabilities of such displays could be used to present relevant information to the driver in a fast and easy-to-understand way, e.g., by functionality-based clustering. However, excessive parallaxes can cause discomfort and in turn negatively influence the primary driving task. This requires a reasonable choice of parallax boundaries. The contribution of this paper is twofold. First, we identify the comfort zone when perceiving 3D content. Second, we determine a minimum depth distance between objects that still enables users to quickly and accurately separate the two depth planes. The results yield that in terms of task completion time the optimum distance from screen level is up to 35.9 arc-min angular disparity behind the screen plane. A distance of at least 2.7 arc-min difference in angular disparity between the objects significantly decreases time for layer separation. Based on the results we derive design implications.},
acmid = {2491582},
doi = {10.1145/2491568.2491582},
isbn = {978-1-4503-2096-2},
keywords = {3D displays, automotive user interfaces, human factors},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/broy2013perdis.pdf},
}
R. José, J. Cardoso, F. Alt, S. Clinch, and N. Davies, “Mobile applications for open display networks: common design considerations,” in Proceedings of the 2nd acm international symposium on pervasive displays, New York, NY, USA, 2013, p. 97–102. doi:10.1145/2491568.2491590
[BibTeX] [Abstract] [Download PDF]
Mobile devices can be a powerful tool for interaction with public displays, but mobile applications supporting this form of interaction are not yet part of our everyday reality. There are no widely accepted abstractions, standards, or practices that may enable systematic interaction between mobile devices and public displays. We envision public displays to move away from a world of closed display networks to scenarios where mobile applications could allow people to interact with the myriad of displays they might encounter during their everyday trips. In this research, we study the key processes involved in this collaborative interaction between public shared displays and mobile applications. Based on the lessons learned from our own development and deployment of 3 applications, and also on the analysis of the interactive features described in the literature, we have identified 8 key processes that may shape this form of interaction: Discovery, Association, Presence Management, Exploration, Interface Migration, Controller, Media Upload and Media Download. The contribution of this work is the identification of these high-level processes and an elicitation of the main design considerations for display networks.
@InProceedings{jose2013perdis,
author = {Jos{\'e}, Rui and Cardoso, Jorge and Alt, Florian and Clinch, Sarah and Davies, Nigel},
title = {Mobile applications for open display networks: common design considerations},
booktitle = {Proceedings of the 2nd ACM International Symposium on Pervasive Displays},
year = {2013},
series = {PerDis '13},
pages = {97--102},
address = {New York, NY, USA},
publisher = {ACM},
note = {jose2013perdis},
abstract = {Mobile devices can be a powerful tool for interaction with public displays, but mobile applications supporting this form of interaction are not yet part of our everyday reality. There are no widely accepted abstractions, standards, or practices that may enable systematic interaction between mobile devices and public displays. We envision public displays to move away from a world of closed display networks to scenarios where mobile applications could allow people to interact with the myriad of displays they might encounter during their everyday trips. In this research, we study the key processes involved in this collaborative interaction between public shared displays and mobile applications. Based on the lessons learned from our own development and deployment of 3 applications, and also on the analysis of the interactive features described in the literature, we have identified 8 key processes that may shape this form of interaction: Discovery, Association, Presence Management, Exploration, Interface Migration, Controller, Media Upload and Media Download. The contribution of this work is the identification of these high-level processes and an elicitation of the main design considerations for display networks.},
acmid = {2491590},
doi = {10.1145/2491568.2491590},
isbn = {978-1-4503-2096-2},
keywords = {mobile applications, open display networks, public displays},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/jose2013perdis.pdf},
}
N. Memarovic, K. Cheverst, M. Langheinrich, I. Elhart, and F. Alt, “Tethered or free to roam: the design space of limiting content access on community displays,” in Proceedings of the 2nd acm international symposium on pervasive displays, New York, NY, USA, 2013, p. 127–132. doi:10.1145/2491568.2491596
[BibTeX] [Abstract] [Download PDF]
Many design decisions need to be made when creating situated public displays that aim to serve a community. One such decision concerns access to its contents: should users be able to access content remotely, e.g., via a web page, or should this be limited to users who are co-located with the display? A similar decision has to be made for community content upload: do posters need to be co-located with the display or can posts be made from any location? In other words, content display and creation can be ‘tethered’ to a display or it can be ‘free to roam’, i.e., accessible from anywhere. In this paper we analyze prior community display deployments in an attempt to explore this space and produce a taxonomy that highlights the inherent design choices. Furthermore, we discuss some of the reasons that may underlie these choices and identify opportunities for design.
@InProceedings{memarovic2013perdis,
author = {Memarovic, Nemanja and Cheverst, Keith and Langheinrich, Marc and Elhart, Ivan and Alt, Florian},
title = {Tethered or free to roam: the design space of limiting content access on community displays},
booktitle = {Proceedings of the 2nd ACM International Symposium on Pervasive Displays},
year = {2013},
series = {PerDis '13},
pages = {127--132},
address = {New York, NY, USA},
publisher = {ACM},
note = {memarovic2013perdis},
abstract = {Many design decisions need to be made when creating situated public displays that aim to serve a community. One such decision concerns access to its contents: should users be able to access content remotely, e.g., via a web page, or should this be limited to users who are co-located with the display? A similar decision has to be made for community content upload: do posters need to be co-located with the display or can posts be made from any location? In other words, content display and creation can be 'tethered' to a display or it can be 'free to roam', i.e., accessible from anywhere. In this paper we analyze prior community display deployments in an attempt to explore this space and produce a taxonomy that highlights the inherent design choices. Furthermore, we discuss some of the reasons that may underlie these choices and identify opportunities for design.},
acmid = {2491596},
doi = {10.1145/2491568.2491596},
isbn = {978-1-4503-2096-2},
keywords = {collocation, communities, content, public displays},
location = {Mountain View, California},
numpages = {6},
owner = {florianalt},
timestamp = {2013.06.11},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2013perdis.pdf},
}
N. Memarovic, M. Langheinrich, K. Cheverst, N. Taylor, and F. Alt, “P-layers – a layered framework addressing the multifaceted issues facing community-supporting public display deployments,” Acm trans. comput.-hum. interact., vol. 20, iss. 3, p. 17:1–17:34, 2013. doi:10.1145/2491500.2491505
[BibTeX] [Abstract] [Download PDF]
The proliferation of digital signage systems has prompted a wealth of research that attempts to use public displays for more than just advertisement or transport schedules, such as their use for supporting communities. However, deploying and maintaining display systems “in the wild” that can support communities is challenging. Based on the authors’ experiences in designing and fielding a diverse range of community-supporting public display deployments, we identify a large set of challenges and issues that researchers working in this area are likely to encounter. Grouping them into five distinct layers – (1) hardware, (2) system architecture, (3) content, (4) system interaction, and (5) community interaction design – we draw up the P-LAYERS framework to enable a more systematic appreciation of the diverse range of issues associated with the development, the deployment, and the maintenance of such systems. Using three of our own deployments as illustrative examples, we will describe both our experiences within each individual layer, as well as point out interactions between the layers. We believe our framework provides a valuable aid for researchers looking to work in this space, alerting them to the issues they are likely to encounter during their deployments, and help them plan accordingly.
@Article{memarovic2013tochi,
author = {Memarovic, Nemanja and Langheinrich, Marc and Cheverst, Keith and Taylor, Nick and Alt, Florian},
title = {P-LAYERS -- A Layered Framework Addressing the Multifaceted Issues Facing Community-Supporting Public Display Deployments},
journal = {ACM Trans. Comput.-Hum. Interact.},
year = {2013},
volume = {20},
number = {3},
pages = {17:1--17:34},
month = jul,
issn = {1073-0516},
note = {memarovic2013tochi},
abstract = {The proliferation of digital signage systems has prompted a wealth of research that attempts to use public displays for more than just advertisement or transport schedules, such as their use for supporting communities. However, deploying and maintaining display systems “in the wild” that can support communities is challenging. Based on the authors’ experiences in designing and fielding a diverse range of community-supporting public display deployments, we identify a large set of challenges and issues that researchers working in this area are likely to encounter. Grouping them into five distinct layers -- (1) hardware, (2) system architecture, (3) content, (4) system interaction, and (5) community interaction design -- we draw up the P-LAYERS framework to enable a more systematic appreciation of the diverse range of issues associated with the development, the deployment, and the maintenance of such systems. Using three of our own deployments as illustrative examples, we will describe both our experiences within each individual layer, as well as point out interactions between the layers. We believe our framework provides a valuable aid for researchers looking to work in this space, alerting them to the issues they are likely to encounter during their deployments, and help them plan accordingly.},
acmid = {2491505},
address = {New York, NY, USA},
articleno = {17},
doi = {10.1145/2491500.2491505},
issue_date = {July 2013},
keywords = {Community interaction, community needs, pervasive displays, public displays},
numpages = {34},
publisher = {ACM},
timestamp = {2013.06.17},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2013tochi.pdf},
}
M. Pfeiffer, S. Schneegaß, and F. Alt, “Supporting Interaction in Public Space with Electrical Muscle Stimulation,” in Adjunct Proceedings of the 2013 ACM International Joint Conference on Pervasive and Ubiquitous Computing, 2013.
[BibTeX] [Abstract] [Download PDF]
As displays in public space are augmented with sensors,such as the Kinect, they enable passersby to interact withthe content on the screen. As of today, feedback on theuser action in such environments is usually limited to thevisual channel. However, we believe that more immediateand intense forms, in particular haptic feedback, do notonly increase the user experience, but may also have astrong impact on user attention and memorization of thecontent encountered during the interaction. Hapticfeedback can today be achieved through vibration on themobile phone, which is strongly dependent on the locationof the device. We envision that fabrics, such as underwear,can in the future be equipped with electrical musclestimulation, thus providing a more natural and direct wayof haptic feedback. In this demo we aim to showcase thepotential of applying electrical muscle stimulation asdirect haptic feedback during interaction in public spacesin the context of a Kinect-based game for public displays.
@InProceedings{pfeiffer2013ubicompadj,
author = {Max Pfeiffer AND Stefan Schneega\ss AND Florian Alt},
title = {{Supporting Interaction in Public Space with Electrical Muscle Stimulation}},
booktitle = {{Adjunct Proceedings of the 2013 ACM International Joint Conference on Pervasive and Ubiquitous Computing}},
year = {2013},
series = {Ubicomp'13},
publisher = {ACM Press},
note = {pfeiffer2013ubicompadj},
abstract = {As displays in public space are augmented with sensors,such as the Kinect, they enable passersby to interact withthe content on the screen. As of today, feedback on theuser action in such environments is usually limited to thevisual channel. However, we believe that more immediateand intense forms, in particular haptic feedback, do notonly increase the user experience, but may also have astrong impact on user attention and memorization of thecontent encountered during the interaction. Hapticfeedback can today be achieved through vibration on themobile phone, which is strongly dependent on the locationof the device. We envision that fabrics, such as underwear,can in the future be equipped with electrical musclestimulation, thus providing a more natural and direct wayof haptic feedback. In this demo we aim to showcase thepotential of applying electrical muscle stimulation asdirect haptic feedback during interaction in public spacesin the context of a Kinect-based game for public displays.},
owner = {florianalt},
timestamp = {2013.09.20},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfeiffer2013ubicompadj.pdf},
}

2012

F. Alt, “Digitale Schwarze Bretter,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 317–321.
[BibTeX] [Download PDF]
@InBook{alt2012mediacultures1d,
chapter = {Digital Black Boards (english)},
pages = {317--321},
title = {{Digitale Schwarze Bretter}},
publisher = {Av Edition},
year = {2012},
author = {Florian Alt},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
month = {may},
note = {alt2012mediacultures1d},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures1d.pdf},
}
F. Alt, “Digital black boards,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 117–120.
[BibTeX] [Download PDF]
@InBook{alt2012mediacultures1e,
chapter = {Digital Black Boards (english)},
pages = {117--120},
title = {Digital Black Boards},
publisher = {Av Edition},
year = {2012},
author = {Florian Alt},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
month = {may},
note = {alt2012mediacultures1e},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures1e.pdf},
}
F. Alt, J. Müller, and A. Schmidt, “Advertising on Public Display Networks,” Ieee computer, vol. 45, iss. 5, pp. 50-56, 2012.
[BibTeX] [Abstract] [Download PDF]
For advertising-based public display networks to become truly pervasive, they must provide a tangible social benefit and be engaging without being obtrusive, blending advertisements with informative content.
@Article{alt2012computer,
author = {Florian Alt and J{\"o}rg M{\"u}ller and Albrecht Schmidt},
title = {{Advertising on Public Display Networks}},
journal = {IEEE Computer},
year = {2012},
volume = {45},
number = {5},
pages = {50-56},
month = {may},
note = {alt2012computer},
abstract = {For advertising-based public display networks to become truly pervasive, they must provide a tangible social benefit and be engaging without being obtrusive, blending advertisements with informative content.},
bibsource = {DBLP, http://dblp.uni-trier.de},
ee = {http://doi.ieeecomputersociety.org/10.1109/MC.2012.150},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012computer.pdf},
}
F. Alt, D. Michelis, and J. Müller, “Pervasive advertising – technologien, konzepte, herausforderungen,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 331–338.
[BibTeX] [Download PDF]
@InBook{alt2012mediacultures2d,
chapter = {Digital Black Boards (english)},
pages = {331--338},
title = {Pervasive Advertising -- Technologien, Konzepte, Herausforderungen},
publisher = {Av Edition},
year = {2012},
author = {Florian Alt AND Daniel Michelis AND J\"{o}rg M\"{u}ller},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
month = {may},
note = {alt2012mediacultures2d},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures2d.pdf},
}
F. Alt, D. Michelis, and J. Müller, “Pervasive advertising technologies,” in Media cultures, S. Pop, U. Stalder, G. Tscherteu, and M. Struppek, Eds., Av Edition, 2012, p. 121–128.
[BibTeX] [Download PDF]
@InBook{alt2012mediacultures2e,
chapter = {Digital Black Boards (english)},
pages = {121--128},
title = {Pervasive Advertising Technologies},
publisher = {Av Edition},
year = {2012},
author = {Florian Alt AND Daniel Michelis AND J\"{o}rg M\"{u}ller},
editor = {Susa Pop AND Ursula Stalder AND Gernot Tscherteu AND Mirjam Struppek},
month = {may},
note = {alt2012mediacultures2e},
booktitle = {Media Cultures},
owner = {flo},
timestamp = {2012.05.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mediacultures2e.pdf},
}
F. Alt, S. Schneegaß, A. Schmidt, J. Müller, and N. Memarovic, “How to Evaluate Public Displays,” in Proceedings of the 2012 international symposium on pervasive displays, New York, NY, USA, 2012, p. 171–176. doi:10.1145/2307798.2307815
[BibTeX] [Abstract] [Download PDF]
After years in the lab, interactive public displays are finding their way into public spaces, shop windows, and public institutions. They are equipped with a multitude of sensors as well as (multi-) touch surfaces allowing not only the audience to be sensed, but also their effectiveness to be measured. The lack of generally accepted design guidelines for public displays and the fact that there are many different objectives (e.g., increasing attention, optimizing interaction times, finding the best interaction technique) make it a challenging task to pick the most suitable evaluation method. Based on a literature survey and our own experiences, this paper provides an overview of study types, paradigms, and methods for evaluation both in the lab and in the real world. Following a discussion of design challenges, we provide a set of guidelines for researchers and practitioners alike to be applied when evaluating public displays.
@InProceedings{alt2012perdis,
author = {Alt, Florian and Schneega\ss, Stefan and Schmidt, Albrecht and M\"{u}ller, J\"{o}rg and Memarovic, Nemanja},
title = {{How to Evaluate Public Displays}},
booktitle = {Proceedings of the 2012 International Symposium on Pervasive Displays},
year = {2012},
series = {PerDis'12},
pages = {171--176},
address = {New York, NY, USA},
month = {jun},
publisher = {ACM},
note = {alt2012perdis},
abstract = {After years in the lab, interactive public displays are finding their way into public spaces, shop windows, and public institutions. They are equipped with a multitude of sensors as well as (multi-) touch surfaces allowing not only the audience to be sensed, but also their effectiveness to be measured. The lack of generally accepted design guidelines for public displays and the fact that there are many different objectives (e.g., increasing attention, optimizing interaction times, finding the best interaction technique) make it a challenging task to pick the most suitable evaluation method. Based on a literature survey and our own experiences, this paper provides an overview of study types, paradigms, and methods for evaluation both in the lab and in the real world. Following a discussion of design challenges, we provide a set of guidelines for researchers and practitioners alike to be applied when evaluating public displays.},
acmid = {2307815},
articleno = {17},
doi = {10.1145/2307798.2307815},
isbn = {978-1-4503-1414-5},
keywords = {digital signage, evaluation, methods, public displays},
location = {Porto, Portugal},
numpages = {6},
timestamp = {2012.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012perdis.pdf},
}
F. Alt and S. Schneegass, “A conceptual architecture for pervasive advertising in public display networks,” in Proceedings of the 3rd workshop on infrastructure and design challenges of coupled display visual interfaces, 2012.
[BibTeX] [Abstract] [Download PDF]
This paper presents a conceptual architecture for pervasiveadvertising on public displays. It can help researchers andpractitioners to inform the design of future display networks.Due to falling hardware prices we see a strong proliferationof (public) places with displays and it is not only large outdooradvertisers anymore operating them. However, publicdisplays currently fail to attract the attention of the user– a challenge that could be overcome by networking displaysand deploying sensors that allow novel interaction techniquesand engaging user experiences to be created. Onemajor question is how to design an appropriate infrastructurethat caters to the conflicting needs of the involved stakeholders.Users want interesting content and their privacy beingrespected, advertisers want to gather the user’s data, anddisplay owners want to be in control of the content as theyfund the infrastructure. We identify the core components anddiscuss how control can be appropriately distributed amongstakeholders by presenting three different forms of the architecture(user-centered, advertiser-centered, trusted).
@InProceedings{alt2012ppd,
author = {Florian Alt AND Stefan Schneegass},
title = {A Conceptual Architecture for Pervasive Advertising in Public Display Networks},
booktitle = {Proceedings of the 3rd Workshop on Infrastructure and Design Challenges of Coupled Display Visual Interfaces},
year = {2012},
series = {PPD'12},
month = jun,
note = {alt2012ppd},
abstract = {This paper presents a conceptual architecture for pervasiveadvertising on public displays. It can help researchers andpractitioners to inform the design of future display networks.Due to falling hardware prices we see a strong proliferationof (public) places with displays and it is not only large outdooradvertisers anymore operating them. However, publicdisplays currently fail to attract the attention of the user– a challenge that could be overcome by networking displaysand deploying sensors that allow novel interaction techniquesand engaging user experiences to be created. Onemajor question is how to design an appropriate infrastructurethat caters to the conflicting needs of the involved stakeholders.Users want interesting content and their privacy beingrespected, advertisers want to gather the user’s data, anddisplay owners want to be in control of the content as theyfund the infrastructure. We identify the core components anddiscuss how control can be appropriately distributed amongstakeholders by presenting three different forms of the architecture(user-centered, advertiser-centered, trusted).},
timestamp = {2012.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012ppd.pdf},
}
F. Alt, A. Sahami Shirazi, A. Schmidt, and R. Atterer, “Bridging waiting times on web pages,” in Proceedings of the 14th international conference on human-computer interaction with mobile devices and services, New York, NY, USA, 2012, p. 305–308. doi:10.1145/2371574.2371619
[BibTeX] [Abstract] [Download PDF]
High-speed Internet connectivity makes browsing a convenient task. However, there are many situations in which surfing the web is still slow due to limited bandwidth, slow servers, or complex queries. As a result, loading web pages can take several seconds, making (mobile) browsing cumbersome. We present an approach which makes use of the time spent on waiting for the next page, by bridging the wait with extra cached or preloaded content. We show how the content (e.g., news, Twitter) can be adapted to the user’s interests and to the context of use, hence making mobile surfing more comfortable. We compare two approaches: in time-multiplex mode, the entire screen displays bridging content until the loading is finished. In space-multiplex mode, content is displayed alongside the requested content while it loads. We use an HTTP proxy to intercept requests and add JavaScript code, which allows the bridging content from websites of our choice to be inserted. The approach was evaluated with 15 participants, assessing suitable content and usability.
@InProceedings{alt2012mobilehci,
author = {Alt, Florian and Sahami Shirazi, Alireza and Schmidt, Albrecht and Atterer, Richard},
title = {Bridging Waiting Times on Web Pages},
booktitle = {Proceedings of the 14th International Conference on Human-computer Interaction with Mobile Devices and Services},
year = {2012},
series = {MobileHCI '12},
pages = {305--308},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2012mobilehci},
abstract = {High-speed Internet connectivity makes browsing a convenient task. However, there are many situations in which surfing the web is still slow due to limited bandwidth, slow servers, or complex queries. As a result, loading web pages can take several seconds, making (mobile) browsing cumbersome. We present an approach which makes use of the time spent on waiting for the next page, by bridging the wait with extra cached or preloaded content. We show how the content (e.g., news, Twitter) can be adapted to the user's interests and to the context of use, hence making mobile surfing more comfortable. We compare two approaches: in time-multiplex mode, the entire screen displays bridging content until the loading is finished. In space-multiplex mode, content is displayed alongside the requested content while it loads. We use an HTTP proxy to intercept requests and add JavaScript code, which allows the bridging content from websites of our choice to be inserted. The approach was evaluated with 15 participants, assessing suitable content and usability.},
acmid = {2371619},
doi = {10.1145/2371574.2371619},
isbn = {978-1-4503-1105-2},
keywords = {mobile device, waiting time, www},
location = {San Francisco, California, USA},
numpages = {4},
timestamp = {2012.10.18},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012mobilehci.pdf},
}
F. Alt, A. S. Shirazi, A. Schmidt, and J. Mennenöh, “Increasing the user’s attention on the web: using implicit interaction based on gaze behavior to tailor content,” in Proceedings of the 7th nordic conference on human-computer interaction: making sense through design, New York, NY, USA, 2012, p. 544–553. doi:10.1145/2399016.2399099
[BibTeX] [Abstract] [Download PDF]
The World Wide Web has evolved into a widely used interactive application platform, providing information, products, and services. With eye trackers we envision that gaze information as an additional input channel can be used in the future to adapt and tailor web content (e.g., news, information, ads) towards the users’ attention as they implicitly interact with web pages. We present a novel approach, which allows web content to be customized on-the-fly based on the the user’s gaze behavior (dwell time, duration of fixations, and number of fixations). Our system analyzes the gaze path on a page and uses this information to create adaptive content on subsequent pages. As a proof-of-concept we report on a case study with 12 participants. We presented them both randomly chosen content (baseline) as well as content chosen based on their gaze-behavior. We found a significant increase of attention towards the adapted content and evidence for changes in the user attitude based on the Elaboration Likelihood Model.
@InProceedings{alt2012nordichi,
author = {Alt, Florian and Shirazi, Alireza Sahami and Schmidt, Albrecht and Mennen\"{o}h, Julian},
title = {Increasing the User's Attention on the Web: Using Implicit Interaction Based on Gaze Behavior to Tailor Content},
booktitle = {Proceedings of the 7th Nordic Conference on Human-Computer Interaction: Making Sense Through Design},
year = {2012},
series = {NordiCHI '12},
pages = {544--553},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2012nordichi},
abstract = {The World Wide Web has evolved into a widely used interactive application platform, providing information, products, and services. With eye trackers we envision that gaze information as an additional input channel can be used in the future to adapt and tailor web content (e.g., news, information, ads) towards the users' attention as they implicitly interact with web pages. We present a novel approach, which allows web content to be customized on-the-fly based on the the user's gaze behavior (dwell time, duration of fixations, and number of fixations). Our system analyzes the gaze path on a page and uses this information to create adaptive content on subsequent pages. As a proof-of-concept we report on a case study with 12 participants. We presented them both randomly chosen content (baseline) as well as content chosen based on their gaze-behavior. We found a significant increase of attention towards the adapted content and evidence for changes in the user attitude based on the Elaboration Likelihood Model.},
acmid = {2399099},
doi = {10.1145/2399016.2399099},
isbn = {978-1-4503-1482-4},
keywords = {adaptative content, eye tracking, implicit interaction},
location = {Copenhagen, Denmark},
numpages = {10},
timestamp = {2012.10.19},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2012nordichi.pdf},
}
A. Bulling, F. Alt, and A. Schmidt, “Increasing The Security Of Gaze-Based Cued-Recall Graphical Passwords Using Saliency Masks,” in Proceedings of the 2012 acm annual conference on human factors in computing systems, New York, NY, USA, 2012, p. 3011–3020. doi:10.1145/2207676.2208712
[BibTeX] [Abstract] [Download PDF]
With computers being used ever more ubiquitously in situations where privacy is important, secure user authentication is a central requirement. Gaze-based graphical passwords are a particularly promising means for shoulder-surfing-resistant authentication, but selecting secure passwords remains challenging. In this paper, we present a novel gaze-based authentication scheme that makes use of cued-recall graphical passwords on a single image. In order to increase password security, our approach uses a computational model of visual attention to mask those areas of the image that are most likely to attract visual attention. We create a realistic threat model for attacks that may occur in public settings, such as filming the user’s interaction while drawing money from an ATM. Based on a 12-participant user study, we show that our approach is significantly more secure than a standard image-based authentication and gaze-based 4-digit PIN entry.
@InProceedings{bulling2012chi,
author = {Bulling, Andreas and Alt, Florian and Schmidt, Albrecht},
title = {{Increasing The Security Of Gaze-Based Cued-Recall Graphical Passwords Using Saliency Masks}},
booktitle = {Proceedings of the 2012 ACM Annual Conference on Human Factors in Computing Systems},
year = {2012},
series = {CHI'12},
pages = {3011--3020},
address = {New York, NY, USA},
month = {apr},
publisher = {ACM},
note = {bulling2012chi},
abstract = {With computers being used ever more ubiquitously in situations where privacy is important, secure user authentication is a central requirement. Gaze-based graphical passwords are a particularly promising means for shoulder-surfing-resistant authentication, but selecting secure passwords remains challenging. In this paper, we present a novel gaze-based authentication scheme that makes use of cued-recall graphical passwords on a single image. In order to increase password security, our approach uses a computational model of visual attention to mask those areas of the image that are most likely to attract visual attention. We create a realistic threat model for attacks that may occur in public settings, such as filming the user's interaction while drawing money from an ATM. Based on a 12-participant user study, we show that our approach is significantly more secure than a standard image-based authentication and gaze-based 4-digit PIN entry.},
acmid = {2208712},
doi = {10.1145/2207676.2208712},
isbn = {978-1-4503-1015-4},
keywords = {cued-recall graphical passwords, eye tracking, gaze-based, saliency masks, User authentication},
location = {Austin, Texas, USA},
numpages = {10},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/bulling2012chi.pdf},
}
J. Müller, R. Walter, G. Bailly, M. Nischt, and F. Alt, “Looking Glass: A Field Study on Noticing Interactivity of a Shop Window,” in Proceedings of the 2012 ACM Conference on Human Factors in Computing Systems, New York, NY, USA, 2012, p. 297–306. doi:10.1145/2207676.2207718
[BibTeX] [Abstract] [Download PDF]
In this paper we present our findings from a lab and a field study investigating how passers-by notice the interactivity of public displays. We designed an interactive installation that uses visual feedback to the incidental movements of passers-by to communicate its interactivity. The lab study reveals: (1) Mirrored user silhouettes and images are more effective than avatar-like representations. (2) It takes time to notice the interactivity (approx. 1.2s). In the field study, three displays were installed during three weeks in shop windows, and data about 502 interaction sessions were collected. Our observations show: (1) Significantly more passers-by interact when immediately showing the mirrored user image (+90%) or silhouette (+47%) compared to a traditional attract sequence with call-to-action. (2) Passers-by often notice interactivity late and have to walk back to interact (the landing effect). (3) If somebody is already interacting, others begin interaction behind the ones already interacting, forming multiple rows (the honeypot effect). Our findings can be used to design public display applications and shop windows that more effectively communicate interactivity to passers-by.
@InProceedings{mueller2012chi,
author = {M\"{u}ller, J\"{o}rg and Walter, Robert and Bailly, Gilles and Nischt, Michael and Alt, Florian},
title = {{Looking Glass: A Field Study on Noticing Interactivity of a Shop Window}},
booktitle = {{Proceedings of the 2012 ACM Conference on Human Factors in Computing Systems}},
year = {2012},
series = {CHI'12},
pages = {297--306},
address = {New York, NY, USA},
month = {apr},
publisher = {ACM},
note = {mueller2012chi},
abstract = {In this paper we present our findings from a lab and a field study investigating how passers-by notice the interactivity of public displays. We designed an interactive installation that uses visual feedback to the incidental movements of passers-by to communicate its interactivity. The lab study reveals: (1) Mirrored user silhouettes and images are more effective than avatar-like representations. (2) It takes time to notice the interactivity (approx. 1.2s). In the field study, three displays were installed during three weeks in shop windows, and data about 502 interaction sessions were collected. Our observations show: (1) Significantly more passers-by interact when immediately showing the mirrored user image (+90%) or silhouette (+47%) compared to a traditional attract sequence with call-to-action. (2) Passers-by often notice interactivity late and have to walk back to interact (the landing effect). (3) If somebody is already interacting, others begin interaction behind the ones already interacting, forming multiple rows (the honeypot effect). Our findings can be used to design public display applications and shop windows that more effectively communicate interactivity to passers-by.},
acmid = {2207718},
doi = {10.1145/2207676.2207718},
isbn = {978-1-4503-1015-4},
keywords = {interactivity, noticing interactivity, public displays, User representation},
location = {Austin, Texas, USA},
numpages = {10},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2012chi.pdf},
}
J. Müller, R. Walter, G. Bailly, M. Nischt, and F. Alt, “Looking Glass: A Field Study on Noticing Interactivity of a Shop Window (Video),” in Adjunct proceedings of the 2012 acm conference on human factors in computing systems, New York, NY, USA, 2012, p. 297–306. doi:10.1145/2207676.2207718
[BibTeX] [Download PDF]
@InProceedings{mueller2012chivideo,
author = {M\"{u}ller, J\"{o}rg and Walter, Robert and Bailly, Gilles and Nischt, Michael and Alt, Florian},
title = {{Looking Glass: A Field Study on Noticing Interactivity of a Shop Window (Video)}},
booktitle = {Adjunct Proceedings of the 2012 ACM Conference on Human Factors in Computing Systems},
year = {2012},
series = {CHI'12},
pages = {297--306},
address = {New York, NY, USA},
month = {apr},
publisher = {ACM},
acmid = {2207718},
doi = {10.1145/2207676.2207718},
isbn = {978-1-4503-1015-4},
keywords = {interactivity, noticing interactivity, public displays, User representation},
location = {Austin, Texas, USA},
numpages = {10},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2012chi.pdf},
}
N. Memarovic, M. Langheinrich, and F. Alt, “Interacting places — a framework for promoting community interaction and place awareness through public displays,” in 2012 ieee international conference on pervasive computing and communications workshops, 2012, pp. 327-430. doi:10.1109/PerComW.2012.6197526
[BibTeX] [Abstract] [Download PDF]
The proliferation of public displays, along withubiquitous wireless communication and sensing technology,has made it possible to create a novel public communicationmedium: open networked pervasive displays would allowcitizens to provide their own content, appropriate close-bydisplays, and increase their own awareness of a display’ssurroundings and its local communities. We envision that suchdisplays ultimately can create interacting places, i.e., publicspaces that promote community interaction and placeawareness. In this paper we describe our Interacting PlacesFramework (IPF), which helps to identify challenges andopportunities in this novel research space. Our IPF has 4elements: 1) content providers, i.e., entities that supply content;2) content viewers, i.e., people who consume the content; 3) anumber of interacting places communication channels thatsupport inclusive, i.e., open-for-everyone, and exclusive, i.e.,closed-group communication; and 4) an awareness diffusionlayer that promotes community interaction either explicitly, i.e.,through content tailored towards a specific audience, orimplicitly, by observing output for other people. We havebegun initial deployments examining this space and will use theframework presented here to analyze future results.
@InProceedings{memarovic2012percomadj,
author = {N. Memarovic and M. Langheinrich and F. Alt},
title = {Interacting places — A framework for promoting community interaction and place awareness through public displays},
booktitle = {2012 IEEE International Conference on Pervasive Computing and Communications Workshops},
year = {2012},
pages = {327-430},
month = {March},
note = {memarovic2012percomadj},
abstract = {The proliferation of public displays, along withubiquitous wireless communication and sensing technology,has made it possible to create a novel public communicationmedium: open networked pervasive displays would allowcitizens to provide their own content, appropriate close-bydisplays, and increase their own awareness of a display’ssurroundings and its local communities. We envision that suchdisplays ultimately can create interacting places, i.e., publicspaces that promote community interaction and placeawareness. In this paper we describe our Interacting PlacesFramework (IPF), which helps to identify challenges andopportunities in this novel research space. Our IPF has 4elements: 1) content providers, i.e., entities that supply content;2) content viewers, i.e., people who consume the content; 3) anumber of interacting places communication channels thatsupport inclusive, i.e., open-for-everyone, and exclusive, i.e.,closed-group communication; and 4) an awareness diffusionlayer that promotes community interaction either explicitly, i.e.,through content tailored towards a specific audience, orimplicitly, by observing output for other people. We havebegun initial deployments examining this space and will use theframework presented here to analyze future results.},
doi = {10.1109/PerComW.2012.6197526},
keywords = {liquid crystal displays;mobile computing;public utilities;social sciences;wireless sensor networks;public displays;ubiquitous wireless communication;public communication medium;open networked pervasive displays;close-by displays;local communities;public spaces;community interaction;place awareness;content providers;content viewers;interacting place communication channel;open-for-everyone communication channel;exclusive communication channel;inclusive communication channel;closed-group communication channel;awareness diffusion layer;wireless sensing technology;Communities;Communication channels;Mobile handsets;Presses;Instruments;Educational institutions;Cities and towns;community interaction;interacting places;public displays;urban computing;urban informatics},
timestamp = {2012.04.17},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2012percomadj.pdf},
}
N. Memarovic, M. Langheinrich, and F. Alt, “The Interacting Places Framework: Conceptualizing Public Display Applications that Promote Community Interaction and Place Awareness,” in Proceedings of the 2012 international symposium on pervasive displays, New York, NY, USA, 2012, p. 71–76. doi:10.1145/2307798.2307805
[BibTeX] [Abstract] [Download PDF]
The proliferation of public displays, along with ubiquitous wireless communication and sensing technology, has made it possible to create a novel public communication medium: open networked pervasive displays would allow citizens to provide their own content, appropriate close-by displays, and increase their own awareness of a display’s surroundings and its local communities. We envision that such displays can create interacting places, i. e., public spaces that promote community interaction and place awareness. In this paper we describe our Interacting Places Framework (IPF), a conceptual framework for designing applications in this novel research space that we developed based on four distinct public display studies. Our IPF focuses on 4 elements: 1) content providers, i. e., entities that will supply content; 2) content viewers, i. e., people who are addressed by the content; 3) communication channels that deliver the content and range from inclusive, i. e., open-for-everyone, to exclusive, i. e., closed-group channels; and 4) an awareness diffusion layer that describes how community awareness building happens both explicitly, i. e., through content tailored towards a specific audience, and implicitly, by observing output for other people.
@InProceedings{memarovic2012perdis,
author = {Memarovic, Nemanja and Langheinrich, Marc and Alt, Florian},
title = {{The Interacting Places Framework: Conceptualizing Public Display Applications that Promote Community Interaction and Place Awareness}},
booktitle = {Proceedings of the 2012 International Symposium on Pervasive Displays},
year = {2012},
series = {PerDis'12},
pages = {71--76},
address = {New York, NY, USA},
month = {jun},
publisher = {ACM},
note = {memarovic2012perdis},
abstract = {The proliferation of public displays, along with ubiquitous wireless communication and sensing technology, has made it possible to create a novel public communication medium: open networked pervasive displays would allow citizens to provide their own content, appropriate close-by displays, and increase their own awareness of a display's surroundings and its local communities. We envision that such displays can create interacting places, i. e., public spaces that promote community interaction and place awareness. In this paper we describe our Interacting Places Framework (IPF), a conceptual framework for designing applications in this novel research space that we developed based on four distinct public display studies. Our IPF focuses on 4 elements: 1) content providers, i. e., entities that will supply content; 2) content viewers, i. e., people who are addressed by the content; 3) communication channels that deliver the content and range from inclusive, i. e., open-for-everyone, to exclusive, i. e., closed-group channels; and 4) an awareness diffusion layer that describes how community awareness building happens both explicitly, i. e., through content tailored towards a specific audience, and implicitly, by observing output for other people.},
acmid = {2307805},
articleno = {7},
doi = {10.1145/2307798.2307805},
isbn = {978-1-4503-1414-5},
keywords = {community interaction, interacting places, public displays, urban Computing, urban informatics},
location = {Porto, Portugal},
numpages = {6},
timestamp = {2012.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2012perdis.pdf},
}
N. Memarovic, M. Langheinrich, F. Alt, I. Elhart, S. Hosio, and E. Rubegni, “Using public displays to stimulate passive engagement, active engagement, and discovery in public spaces,” in Proceedings of the 4th media architecture biennale conference: participation, New York, NY, USA, 2012, p. 55–64. doi:10.1145/2421076.2421086
[BibTeX] [Abstract] [Download PDF]
In their influential book “Public space” Carr et al. describe essential human needs that public spaces fulfill: (1) passive engagement with the environment, where we observe what others are doing; (2) active engagement through intellectual challenges posed by the space, or through engagement with the people in it; and (3) excitement of novel discoveries within the space. An often underused resource in public spaces – public displays – can be used to stimulate these needs. In this paper we argue for a new research direction that explores how public displays can stimulate such essential needs in public spaces. We describe and conceptualize related processes that occur around public displays, based on in-depth observations of people interacting with a publicly fielded display application in a city center. Our conceptualization is meant to lay the foundations for designing engaging public display systems that stimulate PACD, and for supporting the analysis of existing deployments.
@InProceedings{memarovic2012mab,
author = {Memarovic, Nemanja and Langheinrich, Marc and Alt, Florian and Elhart, Ivan and Hosio, Simo and Rubegni, Elisa},
title = {Using Public Displays to Stimulate Passive Engagement, Active Engagement, and Discovery in Public Spaces},
booktitle = {Proceedings of the 4th Media Architecture Biennale Conference: Participation},
year = {2012},
series = {MAB '12},
pages = {55--64},
address = {New York, NY, USA},
publisher = {ACM},
note = {memarovic2012mab},
abstract = {In their influential book "Public space" Carr et al. describe essential human needs that public spaces fulfill: (1) passive engagement with the environment, where we observe what others are doing; (2) active engagement through intellectual challenges posed by the space, or through engagement with the people in it; and (3) excitement of novel discoveries within the space. An often underused resource in public spaces -- public displays -- can be used to stimulate these needs. In this paper we argue for a new research direction that explores how public displays can stimulate such essential needs in public spaces. We describe and conceptualize related processes that occur around public displays, based on in-depth observations of people interacting with a publicly fielded display application in a city center. Our conceptualization is meant to lay the foundations for designing engaging public display systems that stimulate PACD, and for supporting the analysis of existing deployments.},
acmid = {2421086},
doi = {10.1145/2421076.2421086},
isbn = {978-1-4503-1792-4},
keywords = {community interaction, identity cognition, public displays, public space, urban computing, urban informatics},
location = {Aarhus, Denmark},
numpages = {10},
timestamp = {2012.11.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2012mab.pdf},
}
B. Pfleging, F. Alt, and Albre, “Meaningful melodies – personal sonification of text messages for mobile devices,” in Adjunct proceedings of the 14th acm sigchi’s international conference on human-computer interaction with mobile devices and services, San Francisco, CA, US, 2012.
[BibTeX] [Abstract] [Download PDF]
Mobile phones oer great potential for personalization.Besides apps and background images, ringtones are themajor form of personalization. They are most often usedto have a personal sound for incoming texts and calls.Furthermore, ringtones are used to identify the caller orsender of a message. In parts, this function is utilitarian(e.g., caller identication without looking at the phone)but it is also a form of self-expression (e.g., favorite tuneas standard ringtone). We investigate how audio can beused to convey richer information. In this demo we showhow sonications of SMS can be used to encode informationabout the sender’s identity as well as the content andintention of a message based on exible, user-generatedmappings. We present a platform that allows arbitrarymappings to be managed and apps to be connected inorder to create a sonication of any message. Using abackground app on Android, we show the utility of theapproach for mobile devices.
@InProceedings{pfleging2012mobilehciadj,
author = {Bastian Pfleging and Florian Alt and Albre},
title = {Meaningful Melodies - Personal Sonification of Text Messages for Mobile Devices},
booktitle = {Adjunct Proceedings of the 14th ACM SIGCHI's International Conference on Human-Computer Interaction with Mobile Devices and Services},
year = {2012},
series = {MobileHCI'12},
address = {San Francisco, CA, US},
month = {sep},
note = {pfleging2012mobilehciadj},
abstract = {Mobile phones oer great potential for personalization.Besides apps and background images, ringtones are themajor form of personalization. They are most often usedto have a personal sound for incoming texts and calls.Furthermore, ringtones are used to identify the caller orsender of a message. In parts, this function is utilitarian(e.g., caller identication without looking at the phone)but it is also a form of self-expression (e.g., favorite tuneas standard ringtone). We investigate how audio can beused to convey richer information. In this demo we showhow sonications of SMS can be used to encode informationabout the sender's identity as well as the content andintention of a message based on exible, user-generatedmappings. We present a platform that allows arbitrarymappings to be managed and apps to be connected inorder to create a sonication of any message. Using abackground app on Android, we show the utility of theapproach for mobile devices.},
owner = {flo},
timestamp = {2012.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/pfleging2012mobilehciadj.pdf},
}
A. Schmidt, B. Pfleging, F. Alt, A. Sahami, and G. Fitzpatrick, “Interacting with 21st-century computers,” Ieee pervasive computing, vol. 11, iss. 1, pp. 22-31, 2012. doi:10.1109/MPRV.2011.81
[BibTeX] [Abstract] [Download PDF]
This paper reflects on four themes from Weiser’s original vision from a human-computer interaction perspective: computing everywhere, personal computing, the social dimension of computing, and . The authors review developments both in accordance with and contrasting this vision.
@Article{schmidt2012pervasivecomputing,
author = {A. Schmidt and B. Pfleging and F. Alt and A. Sahami and G. Fitzpatrick},
title = {Interacting with 21st-Century Computers},
journal = {IEEE Pervasive Computing},
year = {2012},
volume = {11},
number = {1},
pages = {22-31},
month = {January},
issn = {1536-1268},
note = {schmidt2012pervasivecomputing},
abstract = {This paper reflects on four themes from Weiser's original vision from a human-computer interaction perspective: computing everywhere, personal computing, the social dimension of computing, and . The authors review developments both in accordance with and contrasting this vision.
},
doi = {10.1109/MPRV.2011.81},
keywords = {data privacy;human computer interaction;social aspects of automation;human-computer interaction perspective;Weiser perspective;computing everywhere perspective;personal computing perspective;social dimension perspective;privacy implication;Pervasive computing;User/Machine Systems;User Interfaces;Multimedia Information Systems;Evolutionary prototyping;Human Factors in Software Design;User interfaces.},
timestamp = {2012.04.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schmidt2012pervasivecomputing.pdf},
}
S. Schneegaß, F. Alt, and A. Schmidt, “Mobile interaction with ads on public display networks,” in Proceedings of the 10th international conference on mobile systems, applications, and services, 2012, p. 479–480.
[BibTeX] [Abstract] [Download PDF]
In public places we can observe that many conventional displaysare replaced by digital displays, a lot of them networked. Thesedisplays mainly show advertising in a similar way to television´scommercial break not exploiting the opportunities of the new medium[1]. Several approaches of interaction between mobile devicesand public displays have been investigated over the last 15years. In this demo we concentrate on challenges that are specificto public displays used for advertising. In particular we focus onhow new approaches for interaction with content, means for contentcreation, and tools for follow-ups can be implemented basedon mobile devices. With Digifieds we present a research systemthat has been used to explore different research questions and toshowcase the potential of interactive advertising in public space.
@InProceedings{schneegass2012mobisysadj,
author = {Schneega{\ss}, S. and Alt, F. and Schmidt, A.},
title = {Mobile Interaction with Ads on Public Display Networks},
booktitle = {Proceedings of the 10th International Conference on Mobile Systems, Applications, and Services},
year = {2012},
series = {MobiSys'12},
pages = {479--480},
organization = {ACM},
note = {schneegass2012mobisysadj},
abstract = {In public places we can observe that many conventional displaysare replaced by digital displays, a lot of them networked. Thesedisplays mainly show advertising in a similar way to television´scommercial break not exploiting the opportunities of the new medium[1]. Several approaches of interaction between mobile devicesand public displays have been investigated over the last 15years. In this demo we concentrate on challenges that are specificto public displays used for advertising. In particular we focus onhow new approaches for interaction with content, means for contentcreation, and tools for follow-ups can be implemented basedon mobile devices. With Digifieds we present a research systemthat has been used to explore different research questions and toshowcase the potential of interactive advertising in public space.},
journal = {Proceedings of the 10th International Conference on Mobile Systems, Applications, and Services},
timestamp = {2012.06.25},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schneegass2012mobisysadj.pdf},
}

2011

F. Alt, D. Bial, T. Kubitza, A. S. Shirazi, M. Ortel, B. Zurmaar, F. Zaidan, T. Lewen, and A. Schmidt, “Digifieds: Evaluating Suitable Interaction Techniques for Shared Public Notice Areas,” in Adjunct proceedings of the ninth international conference on pervasive computing, San Francisco, CA, USA, 2011.
[BibTeX] [Abstract] [Download PDF]
Public notice areas are nowadays being widely used in stores, restaurants,cafes and public institutions by customers and visitors to sell or advertiseproducts and upcoming events. Although web platforms such as Craigslist oreBay offer similar services, traditional notice areas are highly popular as usingpen and paper poses only a minimal barrier to share content. With public displaysproliferating the public space and with means to network these displays,novel opportunities arise as to how information can be managed and shared. Inan initial step we systematically assessed factors inhibiting or promoting theshared use of public display space and derived design implications for providinga digital version of such public notice areas [2]. In this poster we report onthe implementation of such a digital shared notice area, called Digifieds. Withan initial lab study we aimed at understanding suitable means of interactionwhen it comes to creating, posting, and taking away content.
@InProceedings{alt2011pervasiveadj,
author = {Florian Alt AND Dominik Bial AND Thomas Kubitza AND Alireza Sahami Shirazi AND Markus Ortel AND Bjoern Zurmaar AND Firas Zaidan AND Tim Lewen AND Albrecht Schmidt},
title = {{Digifieds: Evaluating Suitable Interaction Techniques for Shared Public Notice Areas}},
booktitle = {Adjunct Proceedings of the Ninth International Conference on Pervasive Computing},
year = {2011},
series = {Pervasive'11},
address = {San Francisco, CA, USA},
month = {jun},
note = {alt2011pervasiveadj},
abstract = {Public notice areas are nowadays being widely used in stores, restaurants,cafes and public institutions by customers and visitors to sell or advertiseproducts and upcoming events. Although web platforms such as Craigslist oreBay offer similar services, traditional notice areas are highly popular as usingpen and paper poses only a minimal barrier to share content. With public displaysproliferating the public space and with means to network these displays,novel opportunities arise as to how information can be managed and shared. Inan initial step we systematically assessed factors inhibiting or promoting theshared use of public display space and derived design implications for providinga digital version of such public notice areas [2]. In this poster we report onthe implementation of such a digital shared notice area, called Digifieds. Withan initial lab study we aimed at understanding suitable means of interactionwhen it comes to creating, posting, and taking away content.},
journal = {Adjunct Proceedings of},
owner = {flo},
timestamp = {2011.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2011pervasiveadj.pdf},
}
F. Alt, A. Bungert, B. Pfleging, A. Schmidt, and M. Havemann, “Supporting Children With Special Needs Through Multi-Perspective Behavior Analysis,” in Proceedings of the tenth international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2011, p. 81–84. doi:10.1145/2107596.2107605
[BibTeX] [Abstract] [Download PDF]
In past years, ubiquitous computing technologies have been successfully deployed for supporting children with special needs. One focus of current research has been on post-hoc behavior analysis based on video footage where one or multiple cameras were used to review situations in which children behaved in a certain way. As miniaturized cameras as well as portable devices are becoming available at low costs, we envision a new quality in supporting the diagnosis, observation, and education of children with special needs. In contrast to existing approaches that use cameras in fix locations, we suggest to use multiple mobile camera perspectives. In this way observation data from fellow classmates, teachers, and caregivers can be considered, even in highly dynamic outdoor situations. In this paper we present MuPerBeAn, a platform that allows multi-perspective video footage from mobile cameras to be collected, synchronously reviewed, and annotated. We report on interviews with caregivers and parents and present a qualitative study based on two scenarios involving a total of seven children with autism (CWA). Our findings show that observing multiple mobile perspectives can help children as well as teachers to better reflect on situations, particularly during education.
@InProceedings{alt2011mum2,
author = {Alt, Florian and Bungert, Andreas and Pfleging, Bastian and Schmidt, Albrecht and Havemann, Meindert},
title = {{Supporting Children With Special Needs Through Multi-Perspective Behavior Analysis}},
booktitle = {Proceedings of the Tenth International Conference on Mobile and Ubiquitous Multimedia},
year = {2011},
series = {MUM'11},
pages = {81--84},
address = {New York, NY, USA},
month = {dec},
publisher = {ACM},
note = {alt2011mum2},
abstract = {In past years, ubiquitous computing technologies have been successfully deployed for supporting children with special needs. One focus of current research has been on post-hoc behavior analysis based on video footage where one or multiple cameras were used to review situations in which children behaved in a certain way. As miniaturized cameras as well as portable devices are becoming available at low costs, we envision a new quality in supporting the diagnosis, observation, and education of children with special needs. In contrast to existing approaches that use cameras in fix locations, we suggest to use multiple mobile camera perspectives. In this way observation data from fellow classmates, teachers, and caregivers can be considered, even in highly dynamic outdoor situations. In this paper we present MuPerBeAn, a platform that allows multi-perspective video footage from mobile cameras to be collected, synchronously reviewed, and annotated. We report on interviews with caregivers and parents and present a qualitative study based on two scenarios involving a total of seven children with autism (CWA). Our findings show that observing multiple mobile perspectives can help children as well as teachers to better reflect on situations, particularly during education.},
acmid = {2107605},
doi = {10.1145/2107596.2107605},
isbn = {978-1-4503-1096-3},
keywords = {autism, cameras, mobile devices, ubiquitous computing},
location = {Beijing, China},
numpages = {4},
timestamp = {2011.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2011mum2.pdf},
}
F. Alt, T. Kubitza, D. Bial, F. Zaidan, M. Ortel, B. Zurmaar, T. Lewen, A. S. Shirazi, and A. Schmidt, “Digifieds: insights into deploying digital public notice areas in the wild,” in Proceedings of the 10th international conference on mobile and ubiquitous multimedia, New York, NY, USA, 2011, p. 165–174. doi:10.1145/2107596.2107618
[BibTeX] [Abstract] [Download PDF]
Traditional public notice areas (PNAs) are nowadays a popular means to publicly exchange information and reach people of a local community. The high usability led to a wide-spread use in stores, cafes, supermarkets, and public institutions. With public displays permeating public spaces and with display providers and owners being willing to share parts of their display space we envision traditional PNAs to be complemented or even replaced by their digital counterparts in the future, hence contributing to making public displays a novel communication medium. In this paper we report on the design and development of Digifieds (derived from digital classified), a digital public notice area. We deployed and evaluated Digifieds in an urban environment in the context of the UbiChallenge 2011 in Oulu, Finland over the course of 6 months. The deployment allowed the users’ view to be studied with regard to the envisioned content, preferred interaction techniques, as well as privacy concerns, and to compare them against traditional PNAs.
@InProceedings{alt2011mum1,
author = {Alt, Florian and Kubitza, Thomas and Bial, Dominik and Zaidan, Firas and Ortel, Markus and Zurmaar, Bj\"{o}rn and Lewen, Tim and Shirazi, Alireza Sahami and Schmidt, Albrecht},
title = {Digifieds: Insights into Deploying Digital Public Notice Areas in the Wild},
booktitle = {Proceedings of the 10th International Conference on Mobile and Ubiquitous Multimedia},
year = {2011},
series = {MUM '11},
pages = {165--174},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2011mum1},
abstract = {Traditional public notice areas (PNAs) are nowadays a popular means to publicly exchange information and reach people of a local community. The high usability led to a wide-spread use in stores, cafes, supermarkets, and public institutions. With public displays permeating public spaces and with display providers and owners being willing to share parts of their display space we envision traditional PNAs to be complemented or even replaced by their digital counterparts in the future, hence contributing to making public displays a novel communication medium. In this paper we report on the design and development of Digifieds (derived from digital classified), a digital public notice area. We deployed and evaluated Digifieds in an urban environment in the context of the UbiChallenge 2011 in Oulu, Finland over the course of 6 months. The deployment allowed the users' view to be studied with regard to the envisioned content, preferred interaction techniques, as well as privacy concerns, and to compare them against traditional PNAs.},
acmid = {2107618},
doi = {10.1145/2107596.2107618},
isbn = {978-1-4503-1096-3},
keywords = {classifieds, digifieds, interaction, public displays, urban computing},
location = {Beijing, China},
numpages = {10},
timestamp = {2011.12.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2011mum1.pdf},
}
F. Alt, N. Memarovic, I. Elhart, D. Bial, A. Schmidt, M. Langheinrich, G. Harboe, E. Huang, and M. P. Scipioni, “Designing Shared Public Display Networks: Implications from Today’s Paper-based Notice Areas,” in Proceedings of the ninth international conference on pervasive computing, Berlin, Heidelberg, 2011, p. 258–275.
[BibTeX] [Abstract] [Download PDF]
Abstract. Large public displays have become a regular conceptual element in many shops and businesses, where they advertise products or highlight upcoming events. In our work, we are interested in exploring how these isolated display solutions can be interconnected to form a single large network of public displays, thus supporting novel forms of sharing access to display real estate. In order to explore the feasibility of this vision, we investigated today’s practices surrounding shared notice areas, i.e. places where customers and visitors can put up event posters and classifieds, such as shop windows or notice boards. In particular, we looked at the content posted to such areas, the means for sharing it (i.e., forms of content control), and the reason for providing the shared notice area. Based on two-week long photo logs and a number of in-depth interviews with providers of such notice areas, we provide a systematic assessment of factors that inhibit or promote the shared use of public display space, ultimately leading to a set of concrete design implication for providing future digital versions of such public notice areas in the form of networked public displays.
@InProceedings{alt2011pervasive,
author = {Alt, Florian and Memarovic, Nemanja and Elhart, Ivan and Bial, Dominik and Schmidt, Albrecht and Langheinrich, Marc and Harboe, Gunnar and Huang, Elaine and Scipioni, Marcello P.},
title = {{Designing Shared Public Display Networks: Implications from Today's Paper-based Notice Areas}},
booktitle = {Proceedings of the Ninth International Conference on Pervasive Computing},
year = {2011},
series = {Pervasive'11},
pages = {258--275},
address = {Berlin, Heidelberg},
month = {jun},
publisher = {Springer-Verlag},
note = {alt2011pervasive},
abstract = {Abstract. Large public displays have become a regular conceptual element in many shops and businesses, where they advertise products or highlight upcoming events. In our work, we are interested in exploring how these isolated display solutions can be interconnected to form a single large network of public displays, thus supporting novel forms of sharing access to display real estate. In order to explore the feasibility of this vision, we investigated today’s practices surrounding shared notice areas, i.e. places where customers and visitors can put up event posters and classifieds, such as shop windows or notice boards. In particular, we looked at the content posted to such areas, the means for sharing it (i.e., forms of content control), and the reason for providing the shared notice area. Based on two-week long photo logs and a number of in-depth interviews with providers of such notice areas, we provide a systematic assessment of factors that inhibit or promote the shared use of public display space, ultimately leading to a set of concrete design implication for providing future digital versions of such public notice areas in the form of networked public displays.},
acmid = {2021999},
isbn = {978-3-642-21725-8},
keywords = {advertising, observation, public display},
location = {San Francisco, USA},
numpages = {18},
timestamp = {2011.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2011pervasive.pdf},
}
G. Beyer, F. Alt, and J. Müller, “On the Impact of Non-flat Screens on the Interaction with Public Displays,” in Proceedings of the chi workshop on large displays in urban life, Vancouver, BC, Canada, 2011.
[BibTeX] [Abstract] [Download PDF]
With decreasing prices for display technologies andbendable displays becoming commercially available,novel forms of public displays in arbitrary shapesemerge. However, different shapes impact on how usersbehave in the vicinity of such displays and how theyinteract with them. With our research we take a firststep towards exploring these novel displays. Wepresent findings from an initial study with cylindricaldisplays and discuss to what extent findings can be generalizedtowards other forms of public displays.
@InProceedings{beyer2011ldul,
author = {Gilbert Beyer AND Florian Alt AND J\"{o}rg M\"{u}ller},
title = {{On the Impact of Non-flat Screens on the Interaction with Public Displays}},
booktitle = {Proceedings of the CHI Workshop on Large Displays in Urban Life},
year = {2011},
address = {Vancouver, BC, Canada},
month = {apr},
note = {beyer2011ldul},
abstract = {With decreasing prices for display technologies andbendable displays becoming commercially available,novel forms of public displays in arbitrary shapesemerge. However, different shapes impact on how usersbehave in the vicinity of such displays and how theyinteract with them. With our research we take a firststep towards exploring these novel displays. Wepresent findings from an initial study with cylindricaldisplays and discuss to what extent findings can be generalizedtowards other forms of public displays.},
owner = {flo},
timestamp = {2011.04.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/beyer2011ldul.pdf},
}
G. Beyer, F. Alt, J. Müller, A. Schmidt, K. Isakovic, S. Klose, M. Schiewe, and I. Haulsen, “Audience Behavior Around Large Interactive Cylindrical Screens,” in Proceedings of the 2011 Annual Conference on Human Factors in Computing Systems, New York, NY, USA, 2011, p. 1021–1030. doi:http://doi.acm.org/10.1145/1978942.1979095
[BibTeX] [Abstract] [Download PDF]
Non-planar screens, such as columns, have been a popular means for displaying information for a long time. In con-trast to traditional displays their digital counterparts are mainly flat and rectangular due to current technological constraints. However, we envision bendable displays to be available in the future, which will allow for creating new forms of displays with new properties. In this paper we ex-plore cylindrical displays as a possible form of such novel public displays. We present a prototype and report on a user study, comparing the influence of the display shape on user behavior and user experience between flat and cylindrical displays. The results indicate that people move more in the vicinity of cylindrical displays and that there is no longer a default position when it comes to interaction. As a result, such displays are especially suitable to keep people in motion and to support gesture-like interaction.
@InProceedings{beyer2011chi,
author = {Beyer, Gilbert and Alt, Florian and M\"{u}ller, J\"{o}rg and Schmidt, Albrecht and Isakovic, Karsten and Klose, Stefan and Schiewe, Manuel and Haulsen, Ivo},
title = {{Audience Behavior Around Large Interactive Cylindrical Screens}},
booktitle = {{Proceedings of the 2011 Annual Conference on Human Factors in Computing Systems}},
year = {2011},
series = {CHI'11},
pages = {1021--1030},
address = {New York, NY, USA},
month = {apr},
publisher = {ACM},
note = {beyer2011chi},
abstract = {Non-planar screens, such as columns, have been a popular means for displaying information for a long time. In con-trast to traditional displays their digital counterparts are mainly flat and rectangular due to current technological constraints. However, we envision bendable displays to be available in the future, which will allow for creating new forms of displays with new properties. In this paper we ex-plore cylindrical displays as a possible form of such novel public displays. We present a prototype and report on a user study, comparing the influence of the display shape on user behavior and user experience between flat and cylindrical displays. The results indicate that people move more in the vicinity of cylindrical displays and that there is no longer a default position when it comes to interaction. As a result, such displays are especially suitable to keep people in motion and to support gesture-like interaction.},
acmid = {1979095},
doi = {http://doi.acm.org/10.1145/1978942.1979095},
isbn = {978-1-4503-0228-9},
keywords = {cylindrical screens, digital columns, display formats, interactive surfaces, non-planar screens, public displays},
location = {Vancouver, BC, Canada},
numpages = {10},
timestamp = {2011.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/beyer2011chi.pdf},
}
D. Bial, D. Kern, F. Alt, and A. Schmidt, “Enhancing Outdoor Navigation Systems Through Vibrotactile Feedback,” in Chi ’11 extended abstracts on human factors in computing systems`, New York, NY, USA, 2011, p. 1273–1278. doi:10.1145/1979742.1979760
[BibTeX] [Abstract] [Download PDF]
While driving many tasks compete for the attention of the user, mainly via the audio and visual channel. When designing systems depending upon providing feedback to users (e.g., navigation systems), it is a crucial prerequisite to minimize influence on and distraction from the driving task. This becomes even more important when designing systems for the use on motorbikes; space for output devices is scarce, as people are wearing helmets visual feedback is often difficult due to lighting conditions, and audio feedback is limited. In a first step we aimed at creating an understanding as to how information could be communicated in a meaningful way using vibrotactile signals. Therefore, we investigated suitable positions of actuators on the hand, appropriate length of the vibration stimulus, and different vibration patterns. We built a first prototype with 4 vibration actuators attached to the fingertips and asked 4 participants to test our prototype while driving. With this work we envision to lay the foundations for vibrotactile support in navigation systems.
@InProceedings{bial2011chiea,
author = {Bial, Dominik and Kern, Dagmar and Alt, Florian and Schmidt, Albrecht},
title = {{Enhancing Outdoor Navigation Systems Through Vibrotactile Feedback}},
booktitle = {CHI '11 Extended Abstracts on Human Factors in Computing Systems`},
year = {2011},
series = {CHI EA'11},
pages = {1273--1278},
address = {New York, NY, USA},
month = {apr},
publisher = {ACM},
note = {bial2011chiea},
abstract = {While driving many tasks compete for the attention of the user, mainly via the audio and visual channel. When designing systems depending upon providing feedback to users (e.g., navigation systems), it is a crucial prerequisite to minimize influence on and distraction from the driving task. This becomes even more important when designing systems for the use on motorbikes; space for output devices is scarce, as people are wearing helmets visual feedback is often difficult due to lighting conditions, and audio feedback is limited. In a first step we aimed at creating an understanding as to how information could be communicated in a meaningful way using vibrotactile signals. Therefore, we investigated suitable positions of actuators on the hand, appropriate length of the vibration stimulus, and different vibration patterns. We built a first prototype with 4 vibration actuators attached to the fingertips and asked 4 participants to test our prototype while driving. With this work we envision to lay the foundations for vibrotactile support in navigation systems.},
acmid = {1979760},
doi = {10.1145/1979742.1979760},
isbn = {978-1-4503-0268-5},
keywords = {field study, motorcycling, vibration patterns, vibro tactile navigation},
location = {Vancouver, BC, Canada},
numpages = {6},
timestamp = {2011.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/bial2011chiea.pdf},
}
M. Langheinrich, N. Memarovic, I. Elhart, and F. Alt, “Autopoiesic Content: A Conceptual Model for Enabling Situated Self-generative Content for Public Displays,” in Proceedings of the first workshop on pervasive urban applications, 2011.
[BibTeX] [Abstract] [Download PDF]
Abstract. The significant price drops in large LCD panels have led to a massive proliferation of digital public displays in public spaces. Most of these displays, however, simply show some form of traditional advertising, such as short commercials, animated presentations, or still images. Creating content that ex-plicitly takes the particular location and surroundings of a space into account, in order to increase its relevance for passers-by, is typically infeasible due to the high costs associated with customized content. We argue that the concept of au-topoiesic content (i.e., self-generative content) could significantly increase the local relevance of such situated public displays without requiring much custom-ization efforts. As a sample application, this position paper outlines the concept and architecture of Funsquare, a large public display system that uses autopoi-esic content to facilitate social interaction.
@InProceedings{langheinrich2011purba,
author = {M. Langheinrich AND N. Memarovic AND I. Elhart AND F. Alt},
title = {{Autopoiesic Content: A Conceptual Model for Enabling Situated Self-generative Content for Public Displays}},
booktitle = {Proceedings of the First Workshop on Pervasive Urban Applications},
year = {2011},
series = {PURBA'11},
month = {jun},
note = {langheinrich2011purba},
abstract = {Abstract. The significant price drops in large LCD panels have led to a massive proliferation of digital public displays in public spaces. Most of these displays, however, simply show some form of traditional advertising, such as short commercials, animated presentations, or still images. Creating content that ex-plicitly takes the particular location and surroundings of a space into account, in order to increase its relevance for passers-by, is typically infeasible due to the high costs associated with customized content. We argue that the concept of au-topoiesic content (i.e., self-generative content) could significantly increase the local relevance of such situated public displays without requiring much custom-ization efforts. As a sample application, this position paper outlines the concept and architecture of Funsquare, a large public display system that uses autopoi-esic content to facilitate social interaction.},
location = {San Francisco, US},
timestamp = {2011.05.30},
url = {http://www.florian-alt.org/unibw/wp-content/publications/langheinrich2011purba.pdf},
}
J. Müller, F. Alt, and D. Michelis, Pervasive Advertising, {Springer London Limited}, 2011.
[BibTeX] [Abstract] [Download PDF]
As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.
@Book{mueller2011perad,
title = {{Pervasive Advertising}},
publisher = {{Springer London Limited}},
year = {2011},
author = {J{\"o}rg M{\"u}ller and Florian Alt and Daniel Michelis},
isbn = {978-0-85729-351-0},
note = {mueller2011perad},
abstract = {As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.},
bibsource = {DBLP, http://dblp.uni-trier.de},
comment = {978-0-85729-351-0},
ee = {http://dx.doi.org/10.1007/978-0-85729-352-7},
timestamp = {2011.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2011perad.pdf},
}
J. Müller, F. Alt, and D. Michelis, “Introduction to Pervasive Advertising,” in Pervasive advertising, 2011.
[BibTeX] [Abstract] [Download PDF]
As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.
@InProceedings{mueller2011perad-intro,
author = {J\"{o}rg M\"{u}ller and Florian Alt and Daniel Michelis},
title = {{Introduction to Pervasive Advertising}},
booktitle = {Pervasive Advertising},
year = {2011},
editor = {J\"{o}rg M\"{u}ller and Florian Alt and Daniel Michelis},
month = {sep},
publisher = {Springer Limited London},
note = {mueller2011perad-intro},
abstract = {As pervasive computing technologies leave the labs, they are starting tobe used for the purpose of advertising. Pervasive Advertising has the potential toaffect everyone’s life, but it seems that a knowledge gap is preventing us fromshaping this development in a meaningful way. In particular, many marketing andadvertising professionals have an expert understanding of their trade, but are unawareof recent advances in pervasive computing technologies, the opportunitiesthey offer, and the challenges they pose. Similarly, many pervasive computing researchersand professionals are on top of the recent technological advances, butlack basic marketing and advertising expertise and therefore an understanding ofhow their technology can influence these fields. This book is intended to close thisgap and provide the means to meaningfully shape the future of pervasive advertising.},
comment = {978-0-85729-351-0},
owner = {flo},
timestamp = {2011.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2011perad-intro.pdf},
}
N. Memarovic, M. Langheinrich, and F. Alt, “Connecting People through Content – Promoting Community Identity Cognition through People and Places,” in Proceedings of community informatics, 2011.
[BibTeX] [Abstract] [Download PDF]
Large public screens are proliferating in public spaces. Today, most of them arestandalone installations that display advertisements in the form of slides, short movies, or stillimages. However, it is not hard to imagine that these displays will soon be connected throughthe Internet, thus creating a global and powerful communication medium capable of providingrich, interactive applications. We believe that such a medium has the potential to fosterconnections within and between communities in public spaces. In this paper we present aresearch agenda for interacting places, i.e., public spaces that connect communities throughpublic displays. We then report on our initial work in this space, in particular on using publicdisplays for creating what we call identity cognition – increasing the sense of being connectedbetween community members occupying the same space. We have investigated two options forachieving identity cognition: (a) through content that originates from the environment, and (b)through content that originates from people. Content originating from the environment portraysinformation about a display’s surrounding. For this type of content, identity cognition is usuallybeing achieved implicitly by stimulating the effect of ‘triangulation’, an effect whereparticularities of the physical space act as links between people. Content originating frompeople, on the other hand, explicitly achieves identity cognition by promoting community valuesthrough content that expresses the attitudes, beliefs, and ideas of individual communitymembers. We have built and deployed two public display applications that support identitycognition using environmentally-sourced content and people-sourced content, respectively.
@InProceedings{memarovic2011cirn,
author = {Nemanja Memarovic and Marc Langheinrich and Florian Alt},
title = {{Connecting People through Content - Promoting Community Identity Cognition through People and Places}},
booktitle = {Proceedings of Community Informatics},
year = {2011},
note = {memarovic2011cirn},
abstract = {Large public screens are proliferating in public spaces. Today, most of them arestandalone installations that display advertisements in the form of slides, short movies, or stillimages. However, it is not hard to imagine that these displays will soon be connected throughthe Internet, thus creating a global and powerful communication medium capable of providingrich, interactive applications. We believe that such a medium has the potential to fosterconnections within and between communities in public spaces. In this paper we present aresearch agenda for interacting places, i.e., public spaces that connect communities throughpublic displays. We then report on our initial work in this space, in particular on using publicdisplays for creating what we call identity cognition – increasing the sense of being connectedbetween community members occupying the same space. We have investigated two options forachieving identity cognition: (a) through content that originates from the environment, and (b)through content that originates from people. Content originating from the environment portraysinformation about a display’s surrounding. For this type of content, identity cognition is usuallybeing achieved implicitly by stimulating the effect of ‘triangulation’, an effect whereparticularities of the physical space act as links between people. Content originating frompeople, on the other hand, explicitly achieves identity cognition by promoting community valuesthrough content that expresses the attitudes, beliefs, and ideas of individual communitymembers. We have built and deployed two public display applications that support identitycognition using environmentally-sourced content and people-sourced content, respectively.},
location = {Prato, Italy},
timestamp = {2011.08.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/memarovic2011cirn.pdf},
}
T. Ning, J. Müller, R. Walter, G. Bailly, C. Wacharamanotham, J. Borchers, and F. Alt, “No Need To Stop: Menu Techniques for Passing by Public Displays,” in Proceedings of the chi workshop on large displays in urban lif, Vancouver, BC, Canada, 2011.
[BibTeX] [Abstract] [Download PDF]
Although public displays are increasingly prevalent inpublic spaces, they are generally not interactive. Menutechniques can enable users to select what is interestingto them. Current touch screen techniques are unsuitable,because for many public displays, users merelypass by and rarely stop. We investigate commandselection in this new context of passing-by interaction,in which users only have a few seconds to interact. Wepresent six hands-free gestural techniques and evaluatethem in a Wizard-of-Oz experiment. Based on theresults of this study, we provide design recommendationsfor menu selection in passing-by situations.
@InProceedings{ning2011ldul,
author = {Tongyan Ning AND J\"{o}rg M\"{u}ller AND Robert Walter AND Gilles Bailly AND Chachatvan Wacharamanotham AND Jan Borchers AND Florian Alt},
title = {{No Need To Stop: Menu Techniques for Passing by Public Displays}},
booktitle = {Proceedings of the CHI Workshop on Large Displays in Urban Lif},
year = {2011},
address = {Vancouver, BC, Canada},
month = {apr},
note = {ning2011ldul},
abstract = {Although public displays are increasingly prevalent inpublic spaces, they are generally not interactive. Menutechniques can enable users to select what is interestingto them. Current touch screen techniques are unsuitable,because for many public displays, users merelypass by and rarely stop. We investigate commandselection in this new context of passing-by interaction,in which users only have a few seconds to interact. Wepresent six hands-free gestural techniques and evaluatethem in a Wizard-of-Oz experiment. Based on theresults of this study, we provide design recommendationsfor menu selection in passing-by situations.},
owner = {flo},
timestamp = {2012.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/ning2011ldul.pdf},
}
A. S. Shirazi, T. Kubitza, F. Alt, P. Tarasiewicz, A. Bungert, V. Minakov, and A. Schmidt, “Mobile Context-based Ride Sharing,” in Adjunct proceedings of the ninth international conference on pervasive computing`, San Francisco, CA, US, 2011.
[BibTeX] [Abstract] [Download PDF]
When it comes to transportation, especially in densely populated areas,people usually face a trade-off between convenience and costs. Whereas onone hand convenience as a driving factor leads to that people are preferring touse cars, air pollution, traffic jams, and high cost due to fuel price on the otherhand encourage many people (e.g., commuters) to use collective transportation(CT), such as public transport systems. However it does not support door-todoortransportation and might be inconvenient due to limited services in offpeakhours or high costs when travelling long distances. A solution growing inpopularity is ride sharing, a form of CT making alternative transportation moreaffordable. In this paper we present a modular platform supporting differentforms of ride sharing based on context information. WEtaxi is a system, whichallows sharing taxis among multiple persons. WEticket supports sharing traintickets through finding additional people going onto the same journey.
@InProceedings{sahami2011pervasiveadj,
author = {Alireza Sahami Shirazi AND Thomas Kubitza AND Florian Alt AND Philipp Tarasiewicz AND Andreas Bungert AND Vladimir Minakov AND Albrecht Schmidt},
title = {{Mobile Context-based Ride Sharing}},
booktitle = {Adjunct Proceedings of the Ninth International Conference on Pervasive Computing`},
year = {2011},
series = {Pervasive'11},
address = {San Francisco, CA, US},
note = {sahami2011pervasiveadj},
abstract = {When it comes to transportation, especially in densely populated areas,people usually face a trade-off between convenience and costs. Whereas onone hand convenience as a driving factor leads to that people are preferring touse cars, air pollution, traffic jams, and high cost due to fuel price on the otherhand encourage many people (e.g., commuters) to use collective transportation(CT), such as public transport systems. However it does not support door-todoortransportation and might be inconvenient due to limited services in offpeakhours or high costs when travelling long distances. A solution growing inpopularity is ride sharing, a form of CT making alternative transportation moreaffordable. In this paper we present a modular platform supporting differentforms of ride sharing based on context information. WEtaxi is a system, whichallows sharing taxis among multiple persons. WEticket supports sharing traintickets through finding additional people going onto the same journey.},
owner = {flo},
timestamp = {2011.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/sahami2011pervasiveadj.pdf},
}

2010

F. Alt, D. Kern, F. Schulte, B. Pfleging, A. S. Shirazi, and A. Schmidt, “Enabling micro-entertainment in vehicles based on context information,” in Proceedings of the 2nd international conference on automotive user interfaces and interactive vehicular applications, New York, NY, USA, 2010, p. 117–124. doi:10.1145/1969773.1969794
[BibTeX] [Abstract] [Download PDF]
People spend a significant amount of time in their cars (US: 86 minutes/day, Europe: 43 minutes/day) while commuting, shopping, or traveling. Hence, the variety of entertainment in the car increases, and many vehicles are already equipped with displays, allowing for watching news, videos, accessing the Internet, or playing games. At the same time, the urbanization caused a massive increase of traffic volume, which led to people spending an ever-increasing amount of their time in front of red traffic lights. An observation of the prevailing forms of entertainment in the car reveals that content such as text, videos, or games are often a mere adaptation of content produced for television, public displays, PCs, or mobile phones and do not adapt to the situation in the car. In this paper we report on a web survey assessing which forms of entertainment and which types of content are considered to be useful for in-car entertainment by drivers. We then introduce an algorithm, which is capable of learning standing times in front of traffic lights based on GPS information only. This, on one hand, allows for providing content of appropriate length, on the other hand, for directing the attention of the driver back to-wards the street at the right time. Finally, we present a prototype implementation and a qualitative evaluation.
@InProceedings{alt2010autoui,
author = {Alt, Florian and Kern, Dagmar and Schulte, Fabian and Pfleging, Bastian and Shirazi, Alireza Sahami and Schmidt, Albrecht},
title = {Enabling Micro-entertainment in Vehicles Based on Context Information},
booktitle = {Proceedings of the 2Nd International Conference on Automotive User Interfaces and Interactive Vehicular Applications},
year = {2010},
series = {AutomotiveUI '10},
pages = {117--124},
address = {New York, NY, USA},
publisher = {ACM},
note = {alt2010autoui},
abstract = {People spend a significant amount of time in their cars (US: 86 minutes/day, Europe: 43 minutes/day) while commuting, shopping, or traveling. Hence, the variety of entertainment in the car increases, and many vehicles are already equipped with displays, allowing for watching news, videos, accessing the Internet, or playing games. At the same time, the urbanization caused a massive increase of traffic volume, which led to people spending an ever-increasing amount of their time in front of red traffic lights. An observation of the prevailing forms of entertainment in the car reveals that content such as text, videos, or games are often a mere adaptation of content produced for television, public displays, PCs, or mobile phones and do not adapt to the situation in the car. In this paper we report on a web survey assessing which forms of entertainment and which types of content are considered to be useful for in-car entertainment by drivers. We then introduce an algorithm, which is capable of learning standing times in front of traffic lights based on GPS information only. This, on one hand, allows for providing content of appropriate length, on the other hand, for directing the attention of the driver back to-wards the street at the right time. Finally, we present a prototype implementation and a qualitative evaluation.},
acmid = {1969794},
doi = {10.1145/1969773.1969794},
isbn = {978-1-4503-0437-5},
keywords = {GPS, context, micro entertainment, vehicle},
location = {Pittsburgh, Pennsylvania},
numpages = {8},
timestamp = {2010.11.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2010autoui.pdf},
}
F. Alt, A. S. Shirazi, S. Legien, A. Schmidt, and J. Mennenöh, “Creating Meaningful Melodies from Text Messages,” in Proceedings of the 2010 conference on new interfaces for musical expression, 2010, p. 63–68.
[BibTeX] [Abstract] [Download PDF]
Writing text messages (e.g. email, SMS, instant messaging) is apopular form of synchronous and asynchronous communication.However, when it comes to notifying users about newmessages, current audio-based approaches, such as notificationtones, are very limited in conveying information. In this paperwe show how entire text messages can be encoded into a meaningfuland euphonic melody in such a way that users can guessa message’s intention without actually seeing the content. First,as a proof of concept, we report on the findings of an initial onlinesurvey among 37 musicians and 32 non-musicians evaluatingthe feasibility and validity of our approach. We show thatour representation is understandable and that there are no significantdifferences between musicians and non-musicians.Second, we evaluated the approach in a real world scenariobased on a Skype plug-in. In a field study with 14 participantswe showed that sonified text messages strongly impact on theusers’ message checking behavior by significantly reducing thetime between receiving and reading an incoming message.
@InProceedings{alt2010nime,
author = {Alt, F. and Shirazi, A.S. and Legien, S. and Schmidt, A. and Mennen{\"o}h, J.},
title = {{Creating Meaningful Melodies from Text Messages}},
booktitle = {Proceedings of the 2010 Conference on New Interfaces for Musical Expression},
year = {2010},
series = {NIME'10},
pages = {63--68},
month = {jun},
note = {alt2010nime},
abstract = {Writing text messages (e.g. email, SMS, instant messaging) is apopular form of synchronous and asynchronous communication.However, when it comes to notifying users about newmessages, current audio-based approaches, such as notificationtones, are very limited in conveying information. In this paperwe show how entire text messages can be encoded into a meaningfuland euphonic melody in such a way that users can guessa message’s intention without actually seeing the content. First,as a proof of concept, we report on the findings of an initial onlinesurvey among 37 musicians and 32 non-musicians evaluatingthe feasibility and validity of our approach. We show thatour representation is understandable and that there are no significantdifferences between musicians and non-musicians.Second, we evaluated the approach in a real world scenariobased on a Skype plug-in. In a field study with 14 participantswe showed that sonified text messages strongly impact on theusers’ message checking behavior by significantly reducing thetime between receiving and reading an incoming message.},
timestamp = {2010.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2010nime.pdf},
}
F. Alt, A. S. Shirazi, A. Kaiser, K. Pfeuffer, E. Gurkan, A. Schmidt, P. Holleis, and M. Wagner, “Exploring Ambient Visualizations of Context Information,” in Adjunct proceedings of the eigth annual ieee international conference on pervasive computing and communications, Mannheim, Germany, 2010, pp. 788-791.
[BibTeX] [Abstract] [Download PDF]
In this paper we investigate how ambient displays can be used to share context information. Currently, many personal devices provide context information, such as location or activity, and at the same time the number of ambient displays is increasing. We developed two prototypes for visualizing contextual information and initially explored the suitability of these in an online study. Additionally, we investigated which parameters are important for users when sharing personal context. Based on our findings we discuss guidelines for the design of ambient displays for context sharing.
@InProceedings{alt2010percomadj,
author = {Florian Alt and Alireza Sahami Shirazi and Andreas Kaiser and Ken Pfeuffer and Emre Gurkan and Albrecht Schmidt and Paul Holleis and Matthias Wagner},
title = {{Exploring Ambient Visualizations of Context Information}},
booktitle = {Adjunct Proceedings of the Eigth Annual IEEE International Conference on Pervasive Computing and Communications},
year = {2010},
series = {PerCom'09},
pages = {788-791},
address = {Mannheim, Germany},
month = {apr},
publisher = {IEEE},
note = {alt2010percomadj},
abstract = {
In this paper we investigate how ambient displays can be used to share context information. Currently, many personal devices provide context information, such as location or activity, and at the same time the number of ambient displays is increasing. We developed two prototypes for visualizing contextual information and initially explored the suitability of these in an online study. Additionally, we investigated which parameters are important for users when sharing personal context. Based on our findings we discuss guidelines for the design of ambient displays for context sharing.},
bibsource = {DBLP, http://dblp.uni-trier.de},
ee = {http://dx.doi.org/10.1109/PERCOMW.2010.5470542},
timestamp = {2010.04.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2010percomadj.pdf},
}
F. Alt, A. S. Shirazi, A. Schmidt, U. Kramer, and Z. Nawaz, “Location-based Crowdsourcing: Extending Crowdsourcing to the Real World,” in Proceedings of the sixth nordic conference on human-computer interaction: extending boundaries, New York, NY, USA, 2010, p. 13–22. doi:10.1145/1868914.1868921
[BibTeX] [Abstract] [Download PDF]
The WWW and the mobile phone have become an essential means for sharing implicitly and explicitly generated information and a communication platform for many people. With the increasing ubiquity of location sensing included in mobile devices we investigate the arising opportunities for mobile crowdsourcing making use of the real world context. In this paper we assess how the idea of user-generated content, web-based crowdsourcing, and mobile electronic coordination can be combined to extend crowdsourcing beyond the digital domain and link it to tasks in the real world. To explore our concept we implemented a crowd-sourcing platform that integrates location as a parameter for distributing tasks to workers. In the paper we describe the concept and design of the platform and discuss the results of two user studies. Overall the findings show that integrating tasks in the physical world is useful and feasible. We observed that (1) mobile workers prefer to pull tasks rather than getting them pushed, (2) requests for pictures were the most favored tasks, and (3) users tended to solve tasks mainly in close proximity to their homes. Based on this, we discuss issues that should be considered during designing mobile crowdsourcing applications.
@InProceedings{alt2010nordichi,
author = {Alt, Florian and Shirazi, Alireza Sahami and Schmidt, Albrecht and Kramer, Urs and Nawaz, Zahid},
title = {{Location-based Crowdsourcing: Extending Crowdsourcing to the Real World}},
booktitle = {Proceedings of the Sixth Nordic Conference on Human-Computer Interaction: Extending Boundaries},
year = {2010},
series = {NordiCHI '10},
pages = {13--22},
address = {New York, NY, USA},
month = {oct},
publisher = {ACM},
note = {alt2010nordichi},
abstract = {The WWW and the mobile phone have become an essential means for sharing implicitly and explicitly generated information and a communication platform for many people. With the increasing ubiquity of location sensing included in mobile devices we investigate the arising opportunities for mobile crowdsourcing making use of the real world context. In this paper we assess how the idea of user-generated content, web-based crowdsourcing, and mobile electronic coordination can be combined to extend crowdsourcing beyond the digital domain and link it to tasks in the real world. To explore our concept we implemented a crowd-sourcing platform that integrates location as a parameter for distributing tasks to workers. In the paper we describe the concept and design of the platform and discuss the results of two user studies. Overall the findings show that integrating tasks in the physical world is useful and feasible. We observed that (1) mobile workers prefer to pull tasks rather than getting them pushed, (2) requests for pictures were the most favored tasks, and (3) users tended to solve tasks mainly in close proximity to their homes. Based on this, we discuss issues that should be considered during designing mobile crowdsourcing applications.},
acmid = {1868921},
doi = {10.1145/1868914.1868921},
isbn = {978-1-60558-934-3},
journal = {Proceedings of the Sixth Nordic Conference on Human-Computer Interaction: Extending Boundaries},
keywords = {context, crowdsourcing, location, mobile phone},
location = {Reykjavik, Iceland},
numpages = {10},
timestamp = {2010.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2010nordichi.pdf},
}
G. Beyer, F. Alt, S. Klose, K. Isakovic, A. S. Shirazi, and A. Schmidt, “Design Space for Large Cylindrical Screens,” in Proceedings of the third international workshop on pervasive avertising and shopping, Helsinki, Finland, 2010.
[BibTeX] [Abstract] [Download PDF]
The era of modern cylindrical screens, so-called advertising columns,began in the middle of the 19th century. Even nowadays they are still a popularadvertising medium, which integrates well with urban environments. Withadvances in display technologies (LEDs, projectors) digital forms of suchcolumns emerge and enable novel forms of visualization and interaction, whichsignificantly differ from flat, rectangular screens due to the round shape. In thispaper we present the design space for large cylindrical screens and outlinedesign principles based on observations and experiments with a prototype of adigital column. We especially focus on the differences with flat, rectangulardisplays and report on challenges related to the deployment and development ofapplications for cylindrical screens.
@InProceedings{beyer2010perad,
author = {Gilbert Beyer AND Florian Alt AND Stefan Klose AND Karsten Isakovic AND Alireza Sahami Shirazi AND Albrecht Schmidt},
title = {{Design Space for Large Cylindrical Screens}},
booktitle = {Proceedings of the Third International Workshop on Pervasive Avertising and Shopping},
year = {2010},
series = {PerAd'10},
address = {Helsinki, Finland},
month = {jun},
note = {beyer2010perad},
abstract = {The era of modern cylindrical screens, so-called advertising columns,began in the middle of the 19th century. Even nowadays they are still a popularadvertising medium, which integrates well with urban environments. Withadvances in display technologies (LEDs, projectors) digital forms of suchcolumns emerge and enable novel forms of visualization and interaction, whichsignificantly differ from flat, rectangular screens due to the round shape. In thispaper we present the design space for large cylindrical screens and outlinedesign principles based on observations and experiments with a prototype of adigital column. We especially focus on the differences with flat, rectangulardisplays and report on challenges related to the deployment and development ofapplications for cylindrical screens.},
owner = {flo},
timestamp = {2010.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/beyer2010perad.pdf},
}
A. Müller, A. S. Shirazi, F. Alt, and A. Schmidt, “ZoneTrak: Design and Implementation of an Emergency Management Assistance System,” in Adjunct proceedings of the eigth international conference on pervasive computing, Helsinki, Finland, 2010.
[BibTeX] [Abstract] [Download PDF]
Though pervasive computing technologies are omnipresentin our daily lives, emergency cases, such as earthquakes, or res oftencause serious damages to the underlying infrastructure. In such casesrescue units rely on paper maps of the operation areas and importantinformation are broadcasted either from a central unit or from otherteams. This information is manually updated on the paper map, not onlycausing a lot of work but also being a potential source for errors. In thisresearch we implemented a system that provides a positioning system totrack forces and allows sharing information in real-time. Rescue units canannotate dierent zones and broadcast data to other units whose mapsare automatically updated with available annotations.We show how sucha system can be operated based on an independent infrastructure whichmakes it robust and reliable in emergency and catastrophe situations.
@InProceedings{mueller2010pevasiveadj,
author = {Alexander M\"{u}ller AND Alireza Sahami Shirazi AND Florian Alt AND Albrecht Schmidt},
title = {{ZoneTrak: Design and Implementation of an Emergency Management Assistance System}},
booktitle = {Adjunct Proceedings of the Eigth International Conference on Pervasive Computing},
year = {2010},
series = {Pervasive'10},
address = {Helsinki, Finland},
note = {mueller2010pevasiveadj},
abstract = {Though pervasive computing technologies are omnipresentin our daily lives, emergency cases, such as earthquakes, or res oftencause serious damages to the underlying infrastructure. In such casesrescue units rely on paper maps of the operation areas and importantinformation are broadcasted either from a central unit or from otherteams. This information is manually updated on the paper map, not onlycausing a lot of work but also being a potential source for errors. In thisresearch we implemented a system that provides a positioning system totrack forces and allows sharing information in real-time. Rescue units canannotate dierent zones and broadcast data to other units whose mapsare automatically updated with available annotations.We show how sucha system can be operated based on an independent infrastructure whichmakes it robust and reliable in emergency and catastrophe situations.},
owner = {flo},
timestamp = {2010.06.02},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2010pevasiveadj.pdf},
}
J. Müller, F. Alt, D. Michelis, and A. Schmidt, “Requirements and Design Space for Interactive Public Displays,” in Proceedings of the International Conference on Multimedia, New York, NY, USA, 2010, p. 1285–1294. doi:10.1145/1873951.1874203
[BibTeX] [Abstract] [Download PDF]
Digital immersion is moving into public space. Interactive screens and public displays are deployed in urban environments, malls, and shop windows. Inner city areas, airports, train stations and stadiums are experiencing a transformation from traditional to digital displays enabling new forms of multimedia presentation and new user experiences. Imagine a walkway with digital displays that allows a user to immerse herself in her favorite content while moving through public space. In this paper we discuss the fundamentals for creating exciting public displays and multimedia experiences enabling new forms of engagement with digital content. Interaction in public space and with public displays can be categorized in phases, each having specific requirements. Attracting, engaging and motivating the user are central design issues that are addressed in this paper. We provide a comprehensive analysis of the design space explaining mental models and interaction modalities and we conclude a taxonomy for interactive public display from this analysis. Our analysis and the taxonomy are grounded in a large number of research projects, art installations and experience. With our contribution we aim at providing a comprehensive guide for designers and developers of interactive multimedia on public displays.
@InProceedings{mueller2010mm,
author = {M\"{u}ller, J\"{o}rg and Alt, Florian and Michelis, Daniel and Schmidt, Albrecht},
title = {{Requirements and Design Space for Interactive Public Displays}},
booktitle = {{Proceedings of the International Conference on Multimedia}},
year = {2010},
series = {MM'10},
pages = {1285--1294},
address = {New York, NY, USA},
publisher = {ACM},
note = {mueller2010mm},
abstract = {Digital immersion is moving into public space. Interactive screens and public displays are deployed in urban environments, malls, and shop windows. Inner city areas, airports, train stations and stadiums are experiencing a transformation from traditional to digital displays enabling new forms of multimedia presentation and new user experiences. Imagine a walkway with digital displays that allows a user to immerse herself in her favorite content while moving through public space. In this paper we discuss the fundamentals for creating exciting public displays and multimedia experiences enabling new forms of engagement with digital content. Interaction in public space and with public displays can be categorized in phases, each having specific requirements. Attracting, engaging and motivating the user are central design issues that are addressed in this paper. We provide a comprehensive analysis of the design space explaining mental models and interaction modalities and we conclude a taxonomy for interactive public display from this analysis. Our analysis and the taxonomy are grounded in a large number of research projects, art installations and experience. With our contribution we aim at providing a comprehensive guide for designers and developers of interactive multimedia on public displays.},
acmid = {1874203},
doi = {10.1145/1873951.1874203},
isbn = {978-1-60558-933-6},
keywords = {design space, interaction, public displays, requirements},
location = {Firenze, Italy},
numpages = {10},
timestamp = {2010.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mueller2010mm.pdf},
}
J. Mennenöh, S. Kristes, F. Alt, A. S. S. Shirazi, A. Schmidt, and H. Schröder, “Customer Touchpoints im stationären Einzelhandel – Potenzial von Pervasive Computing,” Marketing review st .gallen, vol. 27, iss. 2, p. 37–42, 2010.
[BibTeX] [Abstract] [Download PDF]
Je mehr individuelle Leistungen die Kunden verlangen, desto mehr Informationen benötigen die Anbieter.Während die Händler im Distanzgeschäft über personalisierte Daten ihrer Kunden und vor allem im Online-Shop über Bewegungsdaten verfügen, hat der stationäre Einzelhandel noch erhebliche Datenlücken.Diese Lücken kann man mit einer Pervasive-Computing-Umgebung schließen. Neue CustomerTouchpointsliefern Informationen darüber, wer bei ihm einkauft und wie der Einkauf durchgeführt wird.
@Article{mennenoeh2010mrsg,
author = {Mennen\"{o}h, Julian and Kristes, Stefanie and Alt, Florian and Shirazi, Alireza Sahami Sahami and Schmidt, Albrecht and Schr\"{o}der, Hendrik},
title = {{Customer Touchpoints im station\"{a}ren Einzelhandel -- Potenzial von Pervasive Computing}},
journal = {Marketing Review St .Gallen},
year = {2010},
volume = {27},
number = {2},
pages = {37--42},
note = {mennenoeh2010mrsg},
abstract = {Je mehr individuelle Leistungen die Kunden verlangen, desto mehr Informationen benötigen die Anbieter.Während die Händler im Distanzgeschäft über personalisierte Daten ihrer Kunden und vor allem im Online-Shop über Bewegungsdaten verfügen, hat der stationäre Einzelhandel noch erhebliche Datenlücken.Diese Lücken kann man mit einer Pervasive-Computing-Umgebung schließen. Neue CustomerTouchpointsliefern Informationen darüber, wer bei ihm einkauft und wie der Einkauf durchgeführt wird.},
publisher = {Springer},
timestamp = {2010.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/mennenoeh2010mrsg.pdf},
}
J. D. H. Ramos, A. Tabard, and F. Alt, “Contextual-Analysis for Infrastructure Awareness Systems,” in Proccedings of the chi workshop “bridging the gap: moving from contextual analysis to design”, Atlanta, GA, USA, 2010.
[BibTeX] [Download PDF]
@InProceedings{ramos2010chiws,
author = {Juan David Hincapie Ramos AND Aurelien Tabard AND Florian Alt},
title = {{Contextual-Analysis for Infrastructure Awareness Systems}},
booktitle = {Proccedings of the CHI Workshop ``Bridging the Gap: Moving from Contextual Analysis to Design''},
year = {2010},
address = {Atlanta, GA, USA},
note = {ramos2010chiws},
owner = {flo},
timestamp = {2010.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/},
}
I. Reif, F. Alt, J. D. Hincapié Ramos, K. Poteriaykina, and J. Wagner, “Cleanly: Trashducation Urban System,” in Chi ’10 extended abstracts on human factors in computing systems, New York, NY, USA, 2010, p. 3511–3516. doi:10.1145/1753846.1754010
[BibTeX] [Abstract] [Download PDF]
Half the world’s population is expected to live in urban areas by 2020. The high human density and changes in peoples’ consumption habits result in an ever-increasing amount of trash that must be handled by governing bodies. Problems created by inefficient or dysfunctional cleaning services are exacerbated by a poor personal trash management culture. In this paper we present Cleanly, an urban trashducation system aimed at creating awareness of garbage production and management, which may serve as an educational plat-form in the urban environment. We report on data collected from an online survey, which not only motivates our research but also provides useful information on reasons and possible solutions for trash problems.
@InProceedings{reif2010chiea,
author = {Reif, Inbal and Alt, Florian and Hincapi{\'e} Ramos, Juan David and Poteriaykina, Katerina and Wagner, Johannes},
title = {{Cleanly: Trashducation Urban System}},
booktitle = {CHI '10 Extended Abstracts on Human Factors in Computing Systems},
year = {2010},
series = {CHI EA'10},
pages = {3511--3516},
address = {New York, NY, USA},
publisher = {ACM},
note = {reif2010chiea},
abstract = {Half the world's population is expected to live in urban areas by 2020. The high human density and changes in peoples' consumption habits result in an ever-increasing amount of trash that must be handled by governing bodies. Problems created by inefficient or dysfunctional cleaning services are exacerbated by a poor personal trash management culture. In this paper we present Cleanly, an urban trashducation system aimed at creating awareness of garbage production and management, which may serve as an educational plat-form in the urban environment. We report on data collected from an online survey, which not only motivates our research but also provides useful information on reasons and possible solutions for trash problems.},
acmid = {1754010},
doi = {10.1145/1753846.1754010},
isbn = {978-1-60558-930-5},
keywords = {design, interaction, public displays, recycling, rfid badges, trashducation, ubiquitous display environments},
location = {Atlanta, Georgia, USA},
numpages = {6},
timestamp = {2010.05.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/reif2010chiea.pdf},
}
A. Sahami Shirazi, A. Sarjanoja, F. Alt, A. Schmidt, and J. Hkkilä, “Understanding the Impact of Abstracted Audio Preview of SMS,” in Proceedings of the 28th international conference on human factors in computing systems, New York, NY, USA, 2010, p. 1735–1738. doi:10.1145/1753326.1753585
[BibTeX] [Abstract] [Download PDF]
Despite the availability of other mobile messaging applications, SMS has kept its position as a heavily used communication technology. However, there are many situations in which it is inconvenient or inappropriate to check a message’s content immediately. In this paper, we introduce the concept of audio previews of SMS. Based on a real-time analysis of the content of a message, we provide auditory cues in addition to the notification tone upon receiving an SMS. We report on a field trial with 20 participants and show that the use of audio-enhanced SMS affects the reading and writing behavior of users. Our work is motivated by the results of an online survey among 347 SMS users of whose we analyzed 3400 text messages.
@InProceedings{sahami2010chi,
author = {Sahami Shirazi, Alireza and Sarjanoja, Ari-Heikki and Alt, Florian and Schmidt, Albrecht and Hkkil\"{a}, Jonna},
title = {{Understanding the Impact of Abstracted Audio Preview of SMS}},
booktitle = {Proceedings of the 28th International Conference on Human Factors in Computing Systems},
year = {2010},
series = {CHI'10},
pages = {1735--1738},
address = {New York, NY, USA},
publisher = {ACM},
note = {sahami2010chi},
abstract = {Despite the availability of other mobile messaging applications, SMS has kept its position as a heavily used communication technology. However, there are many situations in which it is inconvenient or inappropriate to check a message's content immediately. In this paper, we introduce the concept of audio previews of SMS. Based on a real-time analysis of the content of a message, we provide auditory cues in addition to the notification tone upon receiving an SMS. We report on a field trial with 20 participants and show that the use of audio-enhanced SMS affects the reading and writing behavior of users. Our work is motivated by the results of an online survey among 347 SMS users of whose we analyzed 3400 text messages.},
acmid = {1753585},
doi = {10.1145/1753326.1753585},
isbn = {978-1-60558-929-9},
keywords = {auditory ui, emoticon, mobile phone, sms, user studies},
location = {Atlanta, Georgia, USA},
numpages = {4},
timestamp = {2010.01.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/sahami2010chi.pdf},
}
A. S. Shirazi, T. Kubitza, F. Alt, B. Pfleging, and A. Schmidt, “WEtransport: A Context-based Ride Sharing Platform,” in Adjunct proceedings of the twelfth international conference on ubiquitous computing, Copenhagen, Danmark, 2010.
[BibTeX] [Abstract] [Download PDF]
In densely populated urban areas high amounts of trafficpose a major problem, which affects the environment, economy,and our lives. From a user’s perspective, the main issuesinclude delays due to traffic jams, lack of parking spaceand high costs due to increasing fuel prices (e.g., if commutinglong distances). Collective transportation (CT), e.g., publictransport systems, provides a partly solution to these issues.Yet, CT does not support door-to-door transportationhence reducing convenience; it might be limited in off-peakhours, and it is still a cost factor when travelling long distances.A solution to these issues is ride sharing, an evolvingform of CT making alternative transportation more affordable.In this paper we present a modular, context-aware ridesharing platform. We aim at enhancing convenience, reliability,and affordability of different forms of ride sharing bymeans of context data. Additionally our approach supports aneasy server- and client-side expansion due to the modularplatform structure.Author Keywords ride
@InProceedings{sahami2010ubicompadj,
author = {Alireza Sahami Shirazi AND Thomas Kubitza AND Florian Alt AND Bastian Pfleging AND Albrecht Schmidt},
title = {{WEtransport: A Context-based Ride Sharing Platform}},
booktitle = {Adjunct Proceedings of the Twelfth International Conference on Ubiquitous Computing},
year = {2010},
series = {Ubicomp'10},
address = {Copenhagen, Danmark},
note = {sahami2010ubicompadj},
abstract = {In densely populated urban areas high amounts of trafficpose a major problem, which affects the environment, economy,and our lives. From a user’s perspective, the main issuesinclude delays due to traffic jams, lack of parking spaceand high costs due to increasing fuel prices (e.g., if commutinglong distances). Collective transportation (CT), e.g., publictransport systems, provides a partly solution to these issues.Yet, CT does not support door-to-door transportationhence reducing convenience; it might be limited in off-peakhours, and it is still a cost factor when travelling long distances.A solution to these issues is ride sharing, an evolvingform of CT making alternative transportation more affordable.In this paper we present a modular, context-aware ridesharing platform. We aim at enhancing convenience, reliability,and affordability of different forms of ride sharing bymeans of context data. Additionally our approach supports aneasy server- and client-side expansion due to the modularplatform structure.Author Keywords ride},
owner = {flo},
timestamp = {2010.03.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/sahami2010ubicompadj.pdf},
}

2009

F. Alt, M. Balz, S. Kristes, A. S. Shirazi, J. Mennenöh, A. Schmidt, H. Schröder, and M. Gödicke, “Adaptive User Profiles in Pervasive Advertising Environments,” in Proceedings of the european conference on ambient intelligence, Berlin, Heidelberg, 2009, p. 276–286. doi:10.1007/978-3-642-05408-2_32
[BibTeX] [Abstract] [Download PDF]
Nowadays modern advertising environments try to provide more efficient ads by targeting costumers based on their interests. Various approaches exist today as to how information about the users’ interests can be gathered. Users can deliberately and explicitly provide this information or user’s shopping behaviors can be analyzed implicitly. We implemented an advertising platform to simulate an advertising environment and present adaptive profiles, which let users setup profiles based on a self-assessment, and enhance those profiles with information about their real shopping behavior as well as about their activity intensity. Additionally, we explain how pervasive technologies such as Bluetooth can be used to create a profile anonymously and unobtrusively.
@InProceedings{alt2009ami,
author = {Alt, Florian and Balz, Moritz and Kristes, Stefanie and Shirazi, Alireza Sahami and Mennen\"{o}h, Julian and Schmidt, Albrecht and Schr\"{o}der, Hendrik and G\"{o}dicke, Michael},
title = {{Adaptive User Profiles in Pervasive Advertising Environments}},
booktitle = {Proceedings of the European Conference on Ambient Intelligence},
year = {2009},
series = {AmI'09},
pages = {276--286},
address = {Berlin, Heidelberg},
month = {nov},
publisher = {Springer-Verlag},
note = {alt2009ami},
abstract = {Nowadays modern advertising environments try to provide more efficient ads by targeting costumers based on their interests. Various approaches exist today as to how information about the users’ interests can be gathered. Users can deliberately and explicitly provide this information or user’s shopping behaviors can be analyzed implicitly. We implemented an advertising platform to simulate an advertising environment and present adaptive profiles, which let users setup profiles based on a self-assessment, and enhance those profiles with information about their real shopping behavior as well as about their activity intensity. Additionally, we explain how pervasive technologies such as Bluetooth can be used to create a profile anonymously and unobtrusively.},
acmid = {1694666},
doi = {10.1007/978-3-642-05408-2_32},
isbn = {978-3-642-05407-5},
location = {Salzburg, Austria},
numpages = {11},
timestamp = {2009.11.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009ami.pdf},
}
F. Alt, C. Evers, and A. Schmidt, “Mobile Public Display Systems,” in Adjunct proceedings of the tenth workshop on mobile computing, systems, and applications, Santa Cruz, CA, USA, 2009.
[BibTeX] [Download PDF]
@InProceedings{alt2009hotmobileadj,
author = {Florian Alt AND Christoph Evers AND Albrecht Schmidt},
title = {{Mobile Public Display Systems}},
booktitle = {Adjunct Proceedings of the Tenth Workshop on Mobile Computing, Systems, and Applications},
year = {2009},
series = {HotMobile'09},
address = {Santa Cruz, CA, USA},
month = {jun},
note = {alt2009hotmobileadj},
owner = {flo},
timestamp = {2009.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009hotmobileadj.pdf},
}
F. Alt, C. Evers, and A. Schmidt, “Users’ View on Context-Sensitive Car Advertisements,” in Proceedings of the 7th international conference on pervasive computing, Berlin, Heidelberg, 2009, p. 9–16. doi:10.1007/978-3-642-01516-8_2
[BibTeX] [Abstract] [Download PDF]
Cars are ubiquitous and offer large and often highly visible surfaces that can be used as advertising space. Until now, advertising in this domain has focused on commercial vehicles, and advertisements have been painted on and were therefore static, with the exception of car-mounted displays that offer dynamic content. With new display technologies, we expect static displays or uniformly-painted surfaces (e.g. onto car doors or the sides of vans and trucks) to be replaced with embedded dynamic displays. We also see an opportunity for advertisements to be placed on non-commercial cars: results of our online survey with 187 drivers show that more than half of them have an interest in displaying advertising on their cars under two conditions: (1) they will receive financial compensation, and (2) there will be a means for them to influence the type of advertisements shown. Based on these findings, as well as further interviews with car owners and a car fleet manager, we discuss the requirements for a context-aware advertising platform, including a context-advertising editor and contextual content distribution system. We describe an implementation of the system that includes components for car owners to describe their preferences and for advertisers to contextualize their ad content and distribution mechanism.
@InProceedings{alt2009pervasive,
author = {Alt, Florian and Evers, Christoph and Schmidt, Albrecht},
title = {{Users' View on Context-Sensitive Car Advertisements}},
booktitle = {Proceedings of the 7th International Conference on Pervasive Computing},
year = {2009},
series = {Pervasive'09},
pages = {9--16},
address = {Berlin, Heidelberg},
month = {jun},
publisher = {Springer-Verlag},
note = {alt2009pervasive},
abstract = {Cars are ubiquitous and offer large and often highly visible surfaces that can be used as advertising space. Until now, advertising in this domain has focused on commercial vehicles, and advertisements have been painted on and were therefore static, with the exception of car-mounted displays that offer dynamic content. With new display technologies, we expect static displays or uniformly-painted surfaces (e.g. onto car doors or the sides of vans and trucks) to be replaced with embedded dynamic displays. We also see an opportunity for advertisements to be placed on non-commercial cars: results of our online survey with 187 drivers show that more than half of them have an interest in displaying advertising on their cars under two conditions: (1) they will receive financial compensation, and (2) there will be a means for them to influence the type of advertisements shown. Based on these findings, as well as further interviews with car owners and a car fleet manager, we discuss the requirements for a context-aware advertising platform, including a context-advertising editor and contextual content distribution system. We describe an implementation of the system that includes components for car owners to describe their preferences and for advertisers to contextualize their ad content and distribution mechanism.},
acmid = {1560007},
doi = {10.1007/978-3-642-01516-8_2},
isbn = {978-3-642-01515-1},
location = {Nara, Japan},
numpages = {8},
timestamp = {2009.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009pervasive.pdf},
}
F. Alt, A. Schmidt, R. Atterer, and P. Holleis, “Bringing Web 2.0 to the Old Web: A Platform for Parasitic Applications,” in Proceedings of the 12th ifip tc 13 international conference on human-computer interaction: part i, Berlin, Heidelberg, 2009, p. 405–418. doi:10.1007/978-3-642-03655-2_44
[BibTeX] [Abstract] [Download PDF]
It is possible to create interactive, responsive web applications that allow user-generated contributions. However, the relevant technologies have to be explicitly deployed by the authors of the web pages. In this work we present the concept of parasitic and symbiotic web applications which can be deployed on arbitrary web pages by means of a proxy-based application platform. Such applications are capable of inserting, editing and deleting the content of web pages. We use an HTTP proxy in order to insert JavaScript code on each web page that is delivered from the web server to the browser. Additionally we use a database server hosting user-generated scripts as well as high-level APIs allowing for implementing customized web applications. Our approach is capable of cooperating with existing web pages by using shared standards (e.g. formatting of the structure on DOM level) and common APIs but also allows for user-generated (parasitic) applications on arbitrary web pages without the need for cooperation by the page owner.
@InProceedings{alt2009interact,
author = {Alt, Florian and Schmidt, Albrecht and Atterer, Richard and Holleis, Paul},
title = {{Bringing Web 2.0 to the Old Web: A Platform for Parasitic Applications}},
booktitle = {Proceedings of the 12th IFIP TC 13 International Conference on Human-Computer Interaction: Part I},
year = {2009},
series = {INTERACT'09},
pages = {405--418},
address = {Berlin, Heidelberg},
month = {sep},
publisher = {Springer-Verlag},
note = {alt2009interact},
abstract = {It is possible to create interactive, responsive web applications that allow user-generated contributions. However, the relevant technologies have to be explicitly deployed by the authors of the web pages. In this work we present the concept of parasitic and symbiotic web applications which can be deployed on arbitrary web pages by means of a proxy-based application platform. Such applications are capable of inserting, editing and deleting the content of web pages. We use an HTTP proxy in order to insert JavaScript code on each web page that is delivered from the web server to the browser. Additionally we use a database server hosting user-generated scripts as well as high-level APIs allowing for implementing customized web applications. Our approach is capable of cooperating with existing web pages by using shared standards (e.g. formatting of the structure on DOM level) and common APIs but also allows for user-generated (parasitic) applications on arbitrary web pages without the need for cooperation by the page owner.},
acmid = {1615858},
doi = {10.1007/978-3-642-03655-2_44},
isbn = {978-3-642-03654-5},
location = {Uppsala, Sweden},
numpages = {14},
timestamp = {2009.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009interact.pdf},
}
F. Alt, A. Schmidt, and C. Evers, “Mobile Contextual Displays,” in Proceedings of the first international workshop on pervasive advertising, Nara, Japan, 2009.
[BibTeX] [Download PDF]
@InProceedings{alt2009perad1,
author = {Florian Alt AND Albrecht Schmidt AND Christoph Evers},
title = {{Mobile Contextual Displays}},
booktitle = {Proceedings of the First International Workshop on Pervasive Advertising},
year = {2009},
series = {PerAd'09},
address = {Nara, Japan},
month = {jun},
note = {alt2009perad1},
owner = {flo},
timestamp = {2009.06.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009perad1.pdf},
}
F. Alt, A. S. Shirazi, M. Pfeiffer, P. Holleis, and A. S. (Workshop)., “TaxiMedia: An Interactive Context-Aware Entertainment and Advertising System,” in Proceedings of the second international workshop on pervasive advertising, Lübeck, Germany, 2009.
[BibTeX] [Abstract] [Download PDF]
The use of public transport vehicles, such as trams, buses, and taxis asan advertising space is increasing since several years. However mainly the outsideof the vehicles is used to show advertisements using paintings, foil or roofmounteddisplays. Nowadays, with advances in display technologies, small highresolutiondisplays can be easily embedded in vehicles and be used forentertainment or advertising purposes. In this paper we introduce an interactivecontext-ware advertising system designed for cabs, which is targeted to offercontext-aware information such as advertisements, points of interest, events, etc.during a cab ride. Additionally it is possible for advertisers to upload their contentsand define areas where their advertisements should be shown.
@InProceedings{alt2009perad2,
author = {Florian Alt AND Alireza Sahami Shirazi AND Max Pfeiffer AND Paul Holleis AND Albrecht Schmidt (Workshop).},
title = {{TaxiMedia: An Interactive Context-Aware Entertainment and Advertising System}},
booktitle = {Proceedings of the Second International Workshop on Pervasive Advertising},
year = {2009},
series = {PerAd'09},
address = {L\"{u}beck, Germany},
month = {oct},
note = {alt2009perad2},
abstract = {The use of public transport vehicles, such as trams, buses, and taxis asan advertising space is increasing since several years. However mainly the outsideof the vehicles is used to show advertisements using paintings, foil or roofmounteddisplays. Nowadays, with advances in display technologies, small highresolutiondisplays can be easily embedded in vehicles and be used forentertainment or advertising purposes. In this paper we introduce an interactivecontext-ware advertising system designed for cabs, which is targeted to offercontext-aware information such as advertisements, points of interest, events, etc.during a cab ride. Additionally it is possible for advertisers to upload their contentsand define areas where their advertisements should be shown.},
owner = {flo},
timestamp = {2009.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2009perad2.pdf},
}
A. S. Shirazi, F. Alt, A. Schmidt, A. Sarjanoja, L. Hynninen, J. Häkkilä, and P. Holleis, “Emotion Sharing Via Self-Composed Melodies on Mobile Phones,” in Proceedings of the 11th international conference on human-computer interaction with mobile devices and services, New York, NY, USA, 2009, p. 301–304. doi:10.1145/1613858.1613897
[BibTeX] [Abstract] [Download PDF]
In their role as personal communication devices, mobile phones are a natural choice for sharing and communicating emotions. However, their functionalities are currently very limited in power to express affective messages. In this paper, we describe the design of a system that allows users to easily compose melodies and share them via mobile phones. We show that by using these melodies information about the current emotional state of the sender can be expressed and recognized synchronously by the receiver in a simple, quick, and unobtrusive way. Further, we reveal that self-composed melodies have a stronger impact than pre-composed or downloaded messages, similar to crafted pieces of art offered to a beloved person. We then present findings from a user study that assesses the implementation of a functional prototype and the adequacy of the system for emotional communication.
@InProceedings{sahami2009mobilehci,
author = {Shirazi, Alireza Sahami and Alt, Florian and Schmidt, Albrecht and Sarjanoja, Ari-Heikki and Hynninen, Lotta and H\"{a}kkil\"{a}, Jonna and Holleis, Paul},
title = {{Emotion Sharing Via Self-Composed Melodies on Mobile Phones}},
booktitle = {Proceedings of the 11th International Conference on Human-Computer Interaction with Mobile Devices and Services},
year = {2009},
series = {MobileHCI'09},
pages = {301--304},
address = {New York, NY, USA},
publisher = {ACM},
note = {sahami2009mobilehci},
abstract = {In their role as personal communication devices, mobile phones are a natural choice for sharing and communicating emotions. However, their functionalities are currently very limited in power to express affective messages. In this paper, we describe the design of a system that allows users to easily compose melodies and share them via mobile phones. We show that by using these melodies information about the current emotional state of the sender can be expressed and recognized synchronously by the receiver in a simple, quick, and unobtrusive way. Further, we reveal that self-composed melodies have a stronger impact than pre-composed or downloaded messages, similar to crafted pieces of art offered to a beloved person. We then present findings from a user study that assesses the implementation of a functional prototype and the adequacy of the system for emotional communication.},
acmid = {1613897},
articleno = {30},
doi = {10.1145/1613858.1613897},
isbn = {978-1-60558-281-8},
keywords = {composer, emotion sharing, mobile phone, synchronous},
location = {Bonn, Germany},
numpages = {4},
timestamp = {2009.09.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/sahami2009mobilehci.pdf},
}

2008

A. Schmidt, F. Alt, P. Holleis, J. Mueller, and A. Krueger, “Creating Log Files and Click Streams for Advertisements in Physical Space,” in Adjunct proceedings of the 10th international conference on ubiquitous computing, Seoul, South Korea, 2008, p. 28–29.
[BibTeX] [Abstract] [Download PDF]
Poster advertisement has a long tradition and is transformingrapidly into digital media. In this paper we provide anoverview of how sensing can be used to create online andup to date information about potential viewers. We assesswhat application domains can benefit from continuousmonitoring of visitors. As measuring with simple sensors isinherently error prone we suggest the notion of comparativeadvertising power which compares the number of potentialviewers in different locations. We address user acceptanceand privacy concerns and show technical mechanism toincrease privacy.
@Conference{schmidt2008ubicompadj,
author = {Schmidt, A. and Alt, F. and Holleis, P. and Mueller, J. and Krueger, A.},
title = {{Creating Log Files and Click Streams for Advertisements in Physical Space}},
booktitle = {Adjunct Proceedings of the 10th International Conference on Ubiquitous Computing},
year = {2008},
series = {Ubicomp'08},
pages = {28--29},
address = {Seoul, South Korea},
note = {schmidt2008ubicompadj},
abstract = {Poster advertisement has a long tradition and is transformingrapidly into digital media. In this paper we provide anoverview of how sensing can be used to create online andup to date information about potential viewers. We assesswhat application domains can benefit from continuousmonitoring of visitors. As measuring with simple sensors isinherently error prone we suggest the notion of comparativeadvertising power which compares the number of potentialviewers in different locations. We address user acceptanceand privacy concerns and show technical mechanism toincrease privacy.},
timestamp = {2008.10.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schmidt2008ubicompadj.pdf},
}

2007

F. Alt, A. Sahami Shirazi, and A. Schmidt, “Monitoring Heartbeat per Day to Motivate Increasing Physical Activity,” in Proceedings of the ubicomp workshop on interaction with ubiquitous wellness and healthcare applications, Innsbruck, Austria, 2007.
[BibTeX] [Abstract] [Download PDF]
Physical activity is one of the most basic humanfunctions and essential for physical and mental health andhas major beneficial effect on chronic diseases such asheart disease, stroke, etc. Awareness of this condition hasbeen one of the focuses for researching over recent years.One common way for making this awareness is monitoringthe number of steps taken by a person and comparing itwith the minimum amount of steps s/he needs. In thispaper we suggest that instead of this, heartbeat can bemonitored to aware physical activity.
@InProceedings{alt2008ubicompadj,
author = {Alt, Florian AND Sahami Shirazi, Alireza AND Schmidt, Albrecht},
title = {{Monitoring Heartbeat per Day to Motivate Increasing Physical Activity}},
booktitle = {Proceedings of the Ubicomp Workshop on Interaction with Ubiquitous Wellness and Healthcare Applications},
year = {2007},
series = {UbiWell'07},
address = {Innsbruck, Austria},
note = {alt2008ubicompadj},
abstract = {Physical activity is one of the most basic humanfunctions and essential for physical and mental health andhas major beneficial effect on chronic diseases such asheart disease, stroke, etc. Awareness of this condition hasbeen one of the focuses for researching over recent years.One common way for making this awareness is monitoringthe number of steps taken by a person and comparing itwith the minimum amount of steps s/he needs. In thispaper we suggest that instead of this, heartbeat can bemonitored to aware physical activity.},
owner = {flo},
timestamp = {2007.03.01},
url = {http://www.florian-alt.org/unibw/wp-content/publications/alt2008ubicompadj.pdf},
}

2006

A. Schmidt, F. Alt, D. Wilhelm, J. Niggemann, and H. Feussner, “Experimenting with Ubiquitous Computing Technologies in Productive Environments,” Elektrotechnik und informationstechnik, vol. 123, iss. 4, pp. 135-139, 2006.
[BibTeX] [Abstract] [Download PDF]
Ubiquitous computing techniques are ideal tools to bring new solutions to environments which are otherwise quite resistant to rapidchange. In this paper we present techniques to carry out experiments in the very heterogeneous environment of a hospital’s decisionmaking conference, the ‘‘tumour board’’. Introducing the concept of surface interaction we demonstrate how information from varioussources such as X-ray film, slide presentations and projections of CT scans together with oral comments and typed notes can be capturedand made available for surgeons’ use in the operating theatre, without interfering with the ‘‘old’’ way of holding the meeting and withoutputting any extra burden on the hospital staff.
@Article{schmidt2006elektrotechnik,
author = {Albrecht Schmidt and Florian Alt and Dirk Wilhelm and J{\"o}rg Niggemann and Hubertus Feussner},
title = {{Experimenting with Ubiquitous Computing Technologies in Productive Environments}},
journal = {Elektrotechnik und Informationstechnik},
year = {2006},
volume = {123},
number = {4},
pages = {135-139},
note = {schmidt2006elektrotechnik},
abstract = {Ubiquitous computing techniques are ideal tools to bring new solutions to environments which are otherwise quite resistant to rapidchange. In this paper we present techniques to carry out experiments in the very heterogeneous environment of a hospital’s decisionmaking conference, the ‘‘tumour board’’. Introducing the concept of surface interaction we demonstrate how information from varioussources such as X-ray film, slide presentations and projections of CT scans together with oral comments and typed notes can be capturedand made available for surgeons’ use in the operating theatre, without interfering with the ‘‘old’’ way of holding the meeting and withoutputting any extra burden on the hospital staff.},
timestamp = {2006.10.19},
url = {http://www.florian-alt.org/unibw/wp-content/publications/schmidt2006elektrotechnik.pdf},
}

Graduation Theses

2017

F. Hartmann, Time-constrained access control for mobile devices, 2017.
[BibTeX] [Abstract]
In this thesis, a novel concept to unlock smartphones was elaborated. It enables an alternative time-constrainedsession on the smartphone for short access. Before the concept was developed, existing research about smartphone usage,unlock behaviors and non-standard unlock methods was explored. The final concept was implemented as an installable Androidapplication afterwards. The prototype application called SnapApp combines the two already known unlock methods PIN andslide-to-unlock in one lockscreen. The user can decide to either get full access by prompting PIN or to get constrained shortaccess by using slide-to-unlock. A longitudinal field study was conducted by installing the prototype on the smartphones of18 participants, who tested the new lockscreen for a duration of 30 days. Results revealed that SnapApp was able to reducePIN prompts by 20% in total, which also saved valuable time. The security was not impaired, as the majority of the usershas individually configured the maximum session lengths, the expirations of available short sessions and the blacklists, whichcontain apps protected of usage during short access. This was also confirmed by the feedback questionnaires of the study.SnapApp can be adapted to different user needs and was thereby equally accepted by PIN, pattern and swipe users. Besides,the prototype requires no further hardware or sensors and can thus be installed on any Android smartphone.
@Misc{hartmann2017lmu,
author = {Fabian Hartmann},
title = {Time-constrained access control for mobile devices},
howpublished = {LMU Mnchen},
year = {2017},
note = {hartmann2017lmu},
abstract = {In this thesis, a novel concept to unlock smartphones was elaborated. It enables an alternative time-constrainedsession on the smartphone for short access. Before the concept was developed, existing research about smartphone usage,unlock behaviors and non-standard unlock methods was explored. The final concept was implemented as an installable Androidapplication afterwards. The prototype application called SnapApp combines the two already known unlock methods PIN andslide-to-unlock in one lockscreen. The user can decide to either get full access by prompting PIN or to get constrained shortaccess by using slide-to-unlock. A longitudinal field study was conducted by installing the prototype on the smartphones of18 participants, who tested the new lockscreen for a duration of 30 days. Results revealed that SnapApp was able to reducePIN prompts by 20% in total, which also saved valuable time. The security was not impaired, as the majority of the usershas individually configured the maximum session lengths, the expirations of available short sessions and the blacklists, whichcontain apps protected of usage during short access. This was also confirmed by the feedback questionnaires of the study.SnapApp can be adapted to different user needs and was thereby equally accepted by PIN, pattern and swipe users. Besides,the prototype requires no further hardware or sensors and can thus be installed on any Android smartphone.},
owner = {florian},
school = {LMU Mnchen},
timestamp = {2019.04.23},
}