skip to main content

Sketch Recognition Lab
Director: Dr. Tracy Anne Hammond

SRL Doctoral Consortiums


 


Doctoral Consortiums


PublicationImage 2018 Josh Cherian. 2018. "Automatic Recognition of Hygiene Activities and Personalized Interventions for Chronic Care." IUI'18 23rd International Conference on Intelligent User Interfaces Tokyo, Japan. Doctoral Consortium. Tokyo, Japan: IUI, March 2018. https://dl.acm.org/citation.cfm?id=3173153
Show Abstract:


Show BibTex

@inproceedings{joshcherian2018AutomaticRecognitionofHygieneActivitiesandPersonalizedInterventionsforChronicCareDoctoralConsortiums,
author = {Cherian, Josh},
booktitle = {IUI'18 23rd International Conference on Intelligent User Interfaces Tokyo, Japan. Doctoral Consortium},
title = {Automatic Recognition of Hygiene Activities and Personalized Interventions for Chronic Care},
year = {2018},
month = {March},
publisher = {IUI},
address = {Tokyo, Japan},
note = {\url{https://dl.acm.org/citation.cfm?id=3173153}},
}
PublicationImage
 
PublicationImage 2017 Blake Williford. 2017. "SketchTivity: Improving Creativity by Learning Sketching with an Intelligent Tutoring System." C&C '17 Proceedings of the 2017 ACM SIGCHI Conference on Creativity and Cognition. Singapore, Singapore: ACM SIGCHI, June 27, 2017. pp. 477-483. Advisor: Tracy Hammond. ISBN: 978-1-4503-4403-6. 10.1145/3059454.3078695 http://doi.acm.org/10.1145/3059454.3078695 https://dl.acm.org/citation.cfm?id=3078695
Show Abstract:

Sketching is a skill closely tied to creativity and a powerful tool for exploring and expressing ideas and becoming a more well-rounded communicator. Conventional sketching instruction employs traditional mediums such as pen and paper in studio environments to convey these fundamentals to students. However, this approach limits the bandwidth and capability of instructors to give timely and individualized feedback, and students often struggle with low self-efficacy and motivation. An intelligent tutoring system can leverage sketching pedagogy to give students personalized feedback outside of classroom hours, which can potentially improve self-efficacy, motivation, and creativity in the students. Additionally, the inclusion of games and gamified lessons can offer a more engaging and motivating experience for them. This research is aimed at developing intelligent interactive lessons, challenges, and games that teach sketching fundamentals while evaluating their effectiveness in terms of improved sketching ability in the students, increased self-efficacy with respect to sketching ability and creativity, motivation to practice sketching, and idea generation capability. Ongoing work will be focused on developing creative challenges and measuring effects of the system on creative self-efficacy and idea generation in its users.

Show BibTex

@inproceedings{blakewilliford2017SketchTivityImprovingCreativitybyLearningSketchingwithanIntelligentTutoringSystemDoctoralConsortiums,
author = {Williford, Blake},
booktitle = {C\&C '17 Proceedings of the 2017 ACM SIGCHI Conference on Creativity and Cognition},
title = {SketchTivity: Improving Creativity by Learning Sketching with an Intelligent Tutoring System},
pages = {477-483},
year = {2017},
month = {June 27,},
publisher = {ACM SIGCHI},
address = {Singapore, Singapore},
note = {Advisor: Tracy Hammond, ISBN: 978-1-4503-4403-6, \url{http://doi.acm.org/10.1145/3059454.3078695}},
}
PublicationImage
 
PublicationImage 2016 Vijay Rajanna. 2016. "Gaze Typing Through Foot-Operated Wearable Device." the 18th International ACM SIGACCESS Conference on Computers and Accessibility (ASSETS '16). Reno, NV, USA: ACM SIGACCESS, October 24-26, 2016. pp. 345-346. Advisor: Tracy Hammond. ISBN: 978-1-4503-4124-0. 10.1145/2982142.2982145 http://doi.acm.org/10.1145/2982142.2982145 http://dl.acm.org/citation.cfm?doid=2982142.2982145
Show Abstract:

Gaze Typing, a gaze-assisted text entry method, allows individuals with motor (arm, spine) impairments to enter text on a computer using a virtual keyboard and their gaze. Though gaze typing is widely accepted, this method is limited by its lower typing speed, higher error rate, and the resulting visual fatigue, since dwell-based key selection is used. In this research, we present a gaze-assisted, wearable- supplemented, foot interaction framework for dwell-free gaze typing. The framework consists of a custom-built virtual keyboard, an eye tracker, and a wearable device attached to the user’s foot. To enter a character, the user looks at the character and selects it by pressing the pressure pad, attached to the wearable device, with the foot. Results from a preliminary user study involving two participants with motor impairments show that the participants achieved a mean gaze typing speed of 6.23 Words Per Minute (WPM). In addition, the mean value of Key Strokes Per Character (KPSC) was 1.07 (ideal 1.0), and the mean value of Rate of Backspace Activation (RBA) was 0.07 (ideal 0.0). Furthermore, we present our findings from multiple usability studies and design iterations, through which we created appropriate affordances and experience design of our gaze typing system.

Show BibTex

@inproceedings{vijayrajanna2016GazeTypingThroughFootOperatedWearableDeviceDoctoralConsortiums,
author = {Rajanna, Vijay},
booktitle = {the 18th International ACM SIGACCESS Conference on Computers and Accessibility (ASSETS '16)},
title = {Gaze Typing Through Foot-Operated Wearable Device},
pages = {345-346},
year = {2016},
month = {October 24-26,},
publisher = {ACM SIGACCESS},
address = {Reno, NV, USA},
note = {Advisor: Tracy Hammond, ISBN: 978-1-4503-4124-0, \url{http://doi.acm.org/10.1145/2982142.2982145}},
}
PublicationImage
 
PublicationImage 2016 Vijay Rajanna. 2016. "Gaze and Foot Input: Toward a Rich and Assistive Interaction Modality." Companion Publication of the 21st International Conference on Intelligent User Interfaces. IUI '16 Companion. Sonoma, California, USA: ACM, March 7-10, 2016. pp. 126-129. Advisor: Tracy Hammond. ISBN: 978-1-4503-4140-0. 10.1145/2876456.2876462 http://doi.acm.org/10.1145/2876456.2876462 http://dl.acm.org/citation.cfm?id=2876462&CFID=590130928&CFTOKEN=51402862
Show Abstract:

Transforming gaze input into a rich and assistive interaction modality is one of the primary interests in eye tracking research. Gaze input in conjunction with traditional solutions to the "Midas Touch" problem, dwell time or a blink, is not matured enough to be widely adopted. In this regard, we present our preliminary work, a framework that achieves precise "point and click" interactions in a desktop environment through combining the gaze and foot interaction modalities. The framework comprises of an eye tracker and a foot-operated quasi-mouse that is wearable. The system evaluation shows that our gaze and foot interaction framework performs as good as a mouse (time and precision) in the majority of tasks. Furthermore, this dissertation work focuses on the goal of realizing gaze-assisted interaction as a primary interaction modality to substitute conventional mouse and keyboard-based interaction methods. In addition, we consider some of the challenges that need to be addressed, and also present the possible solutions toward achieving our goal.

Show BibTex

@inproceedings{vijayrajanna2016GazeandFootInputTowardaRichandAssistiveInteractionModalityDoctoralConsortiums,
author = {Rajanna, Vijay},
booktitle = {Companion Publication of the 21st International Conference on Intelligent User Interfaces},
title = {Gaze and Foot Input: Toward a Rich and Assistive Interaction Modality},
pages = {126-129},
year = {2016},
month = {March 7-10,},
publisher = {ACM},
address = {Sonoma, California, USA},
series = {IUI '16 Companion},
note = {Advisor: Tracy Hammond, ISBN: 978-1-4503-4140-0, \url{http://doi.acm.org/10.1145/2876456.2876462}},
}
PublicationImage
 
PublicationImage 2016 Vijay Rajanna and Tracy Hammond. 2016. "Gaze and Foot Input: Toward a Rich and Assistive Interaction Modality." Proceedings of the 21st international conference on Intelligent User Interfaces (IUI '16). Doctoral Consortium.. Sonoma, California, USA: ACM, March 7-10, 2016. http://dl.acm.org/citation.cfm?id=2876462&CFID=590130928&CFTOKEN=51402862
Show Abstract:

@inproceedings{RajannaIUI2016, author = {Rajanna, Vijay Dandur}, title = {Gaze and Foot Input: Toward a Rich and Assistive Interaction Modality}, booktitle = {Companion Publication of the 21st International Conference on Intelligent User Interfaces}, series = {IUI '16 Companion}, year = {2016}, isbn = {978-1-4503-4140-0}, location = {Sonoma, California, USA}, pages = {126--129}, numpages = {4}, url = {http://doi.acm.org/10.1145/2876456.2876462}, doi = {10.1145/2876456.2876462}, acmid = {2876462}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {authentication, eye tracking, foot input, gaze and foot interaction, tabletop interaction}, }

Show BibTex

@inproceedings{vijayrajannatracyhammond2016GazeandFootInputTowardaRichandAssistiveInteractionModalityDoctoralConsortiums,
author = {Rajanna, Vijay and Hammond, Tracy},
booktitle = {Proceedings of the 21st international conference on Intelligent User Interfaces (IUI '16). Doctoral Consortium.},
title = {Gaze and Foot Input: Toward a Rich and Assistive Interaction Modality},
year = {2016},
month = {March 7-10,},
publisher = {ACM},
address = {Sonoma, California, USA},
}
PublicationImagePublicationImage
 
PublicationImage 2014 Paul Taele and Tracy Hammond. 2014. "Developing Sketch Recognition and Interaction Techniques for Intelligent Surfaceless Sketching User Interfaces." Proceedings of the Companion Publication of the 19th International Conference on Intelligent User Interfaces (IUI) Doctoral Consortium. Haifa, Israel, Israel: ACM, February24, 2014. pp. 53-55. ISBN: 978-1-4503-2729-9.
Show Abstract:

@inproceedings{TaeleIUI2014, author = {Taele, Paul and Hammond, Tracy}, title = {Developing Sketch Recognition and Interaction Techniques for Intelligent Surfaceless Sketching User Interfaces}, booktitle = {Proceedings of the Companion Publication of the 19th International Conference on Intelligent User Interfaces}, series = {IUI Companion '14}, year = {2014}, isbn = {978-1-4503-2729-9}, location = {Haifa, Israel}, pages = {53--56}, numpages = {4}, url = {http://doi.acm.org/10.1145/2559184.2559185}, doi = {10.1145/2559184.2559185}, acmid = {2559185}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {natural user interfaces, sketch recognition, surfaceless interaction} }

Show BibTex

@inproceedings{paultaeletracyhammond2014DevelopingSketchRecognitionandInteractionTechniquesforIntelligentSurfacelessSketchingUserInterfacesDoctoralConsortiums,
author = {Taele, Paul and Hammond, Tracy},
booktitle = {Proceedings of the Companion Publication of the 19th International Conference on Intelligent User Interfaces (IUI) Doctoral Consortium},
title = {Developing Sketch Recognition and Interaction Techniques for Intelligent Surfaceless Sketching User Interfaces},
pages = {53-55},
year = {2014},
month = {February24, },
publisher = {ACM},
address = {Haifa, Israel, Israel},
note = {ISBN: 978-1-4503-2729-9},
}
PublicationImagePublicationImage
 





Show All BibTex


@inproceedings{joshcherian2018AutomaticRecognitionofHygieneActivitiesandPersonalizedInterventionsforChronicCareDoctoralConsortiums,
author = {Cherian, Josh},
booktitle = {IUI'18 23rd International Conference on Intelligent User Interfaces Tokyo, Japan. Doctoral Consortium},
title = {Automatic Recognition of Hygiene Activities and Personalized Interventions for Chronic Care},
year = {2018},
month = {March},
publisher = {IUI},
address = {Tokyo, Japan},
note = {\url{https://dl.acm.org/citation.cfm?id=3173153}},
}


@inproceedings{blakewilliford2017SketchTivityImprovingCreativitybyLearningSketchingwithanIntelligentTutoringSystemDoctoralConsortiums,
author = {Williford, Blake},
booktitle = {C\&C '17 Proceedings of the 2017 ACM SIGCHI Conference on Creativity and Cognition},
title = {SketchTivity: Improving Creativity by Learning Sketching with an Intelligent Tutoring System},
pages = {477-483},
year = {2017},
month = {June 27,},
publisher = {ACM SIGCHI},
address = {Singapore, Singapore},
note = {Advisor: Tracy Hammond, ISBN: 978-1-4503-4403-6, \url{http://doi.acm.org/10.1145/3059454.3078695}},
}


@inproceedings{vijayrajanna2016GazeTypingThroughFootOperatedWearableDeviceDoctoralConsortiums,
author = {Rajanna, Vijay},
booktitle = {the 18th International ACM SIGACCESS Conference on Computers and Accessibility (ASSETS '16)},
title = {Gaze Typing Through Foot-Operated Wearable Device},
pages = {345-346},
year = {2016},
month = {October 24-26,},
publisher = {ACM SIGACCESS},
address = {Reno, NV, USA},
note = {Advisor: Tracy Hammond, ISBN: 978-1-4503-4124-0, \url{http://doi.acm.org/10.1145/2982142.2982145}},
}


@inproceedings{vijayrajanna2016GazeandFootInputTowardaRichandAssistiveInteractionModalityDoctoralConsortiums,
author = {Rajanna, Vijay},
booktitle = {Companion Publication of the 21st International Conference on Intelligent User Interfaces},
title = {Gaze and Foot Input: Toward a Rich and Assistive Interaction Modality},
pages = {126-129},
year = {2016},
month = {March 7-10,},
publisher = {ACM},
address = {Sonoma, California, USA},
series = {IUI '16 Companion},
note = {Advisor: Tracy Hammond, ISBN: 978-1-4503-4140-0, \url{http://doi.acm.org/10.1145/2876456.2876462}},
}


@inproceedings{vijayrajannatracyhammond2016GazeandFootInputTowardaRichandAssistiveInteractionModalityDoctoralConsortiums,
author = {Rajanna, Vijay and Hammond, Tracy},
booktitle = {Proceedings of the 21st international conference on Intelligent User Interfaces (IUI '16). Doctoral Consortium.},
title = {Gaze and Foot Input: Toward a Rich and Assistive Interaction Modality},
year = {2016},
month = {March 7-10,},
publisher = {ACM},
address = {Sonoma, California, USA},
}


@inproceedings{paultaeletracyhammond2014DevelopingSketchRecognitionandInteractionTechniquesforIntelligentSurfacelessSketchingUserInterfacesDoctoralConsortiums,
author = {Taele, Paul and Hammond, Tracy},
booktitle = {Proceedings of the Companion Publication of the 19th International Conference on Intelligent User Interfaces (IUI) Doctoral Consortium},
title = {Developing Sketch Recognition and Interaction Techniques for Intelligent Surfaceless Sketching User Interfaces},
pages = {53-55},
year = {2014},
month = {February24, },
publisher = {ACM},
address = {Haifa, Israel, Israel},
note = {ISBN: 978-1-4503-2729-9},
}


Show All Latex Include

\subsection{Doctoral Consortiums}

\subsubsection{Doctoral Consortiums}
\begin{enumerate}
\item \bibentry{joshcherian2018AutomaticRecognitionofHygieneActivitiesandPersonalizedInterventionsforChronicCareDoctoralConsortiums}
\item \bibentry{blakewilliford2017SketchTivityImprovingCreativitybyLearningSketchingwithanIntelligentTutoringSystemDoctoralConsortiums}
\item \bibentry{vijayrajanna2016GazeTypingThroughFootOperatedWearableDeviceDoctoralConsortiums}
\item \bibentry{vijayrajanna2016GazeandFootInputTowardaRichandAssistiveInteractionModalityDoctoralConsortiums}
\item \bibentry{vijayrajannatracyhammond2016GazeandFootInputTowardaRichandAssistiveInteractionModalityDoctoralConsortiums}
\item \bibentry{paultaeletracyhammond2014DevelopingSketchRecognitionandInteractionTechniquesforIntelligentSurfacelessSketchingUserInterfacesDoctoralConsortiums}
\end{enumerate}