Xiao XIAO is Principal Investigator of the Human Learning Group at DVIC. She is interested in creating new digital tools to help humans learn diverse skills across domaines and stages of life. Xiao completed a PhD at the MIT Media Lab in the Tangible Media Group.
Paul-Peter Arslan; Xiao Xiao; Maxime Térémetz; Pavel Lindberg
Motor Control of the Hand and Speech Synthesis Through a Rhythmic Game Conférence
Progress in Motor Control XIV, International Society of Motor Control Rome, Italy, 2023.
@conference{arslan_2484,
title = {Motor Control of the Hand and Speech Synthesis Through a Rhythmic Game},
author = {Paul-Peter Arslan and Xiao Xiao and Maxime Térémetz and Pavel Lindberg},
url = {n/a},
year = {2023},
date = {2023-09-01},
booktitle = {Progress in Motor Control XIV},
address = {Rome, Italy},
organization = {International Society of Motor Control},
abstract = {Motor control of the hand requires a high level of dexterity. Temporal processing between hand and speech may be intrinsically linked, with each modality informing and complementing the other. This work introduces a finger-tapping musical game that allows the control of song synthesis using hand movements and rhythm control.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Adrien Lefevre; Xiao Xiao; Clément Duhart
The Interactive Musical Score: An Electronic Score To Hear The Notes From A Paper Score Conférence
New Interfaces for Musical Expression, Aotearoa, New Zealand, 2022.
@conference{lefevre_1830,
title = {The Interactive Musical Score: An Electronic Score To Hear The Notes From A Paper Score},
author = {Adrien Lefevre and Xiao Xiao and Clément Duhart},
url = {https://nime2022.org/program.html},
year = {2022},
date = {2022-06-01},
booktitle = {New Interfaces for Musical Expression},
address = {Aotearoa, New Zealand},
abstract = {The Interactive Score is a novel instrumental device for children's solfege learning. Paper scores are overlaid onto a staff drawn with conductive ink and connected to an Adafruit musical box. Pressing a note in the score triggers its sound, and running fingers over the notes plays a melody.
Learning to read music from the score is an essential part of Western classical music training. Traditionally, children learn the different music notes by singing or playing notes on an instrument, guided by a teacher. We envision a way for children to learn the correspondence between notation and sound by directly touching the score.
The Interactive Score is effortless to use and allows children to make discoveries on their own. The correspondence between the visual, the tactile , and the sound can aid in learning.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Xiao Xiao; Sarah Fdili Alaoui
Tuning in to Intangibility: Reflections from my first 3 years of theremin learning Proceedings Article
Dans: Designing Interactive Systems, p. 2649 - 2659, Association for Computing Machinery, New York, NY, United States, Copenhagen, Denmark, 2024, ISBN: 979-8-4007-0583-0.
@inproceedings{xiao_2942,
title = {Tuning in to Intangibility: Reflections from my first 3 years of theremin learning},
author = {Xiao Xiao and Sarah Fdili Alaoui},
url = {https://dl.acm.org/doi/abs/10.1145/3643834.3661584},
issn = {979-8-4007-0583-0},
year = {2024},
date = {2024-07-01},
booktitle = {Designing Interactive Systems},
pages = {2649 - 2659},
publisher = {Association for Computing Machinery, New York, NY, United States},
address = {Copenhagen, Denmark},
abstract = {This paper presents an autoethnography of 3 years of learning to play the theremin, an instrument lacking tangible feedback. While the theremin is typically invoked in HCI to emphasize the importance of the tactile modality, we interrogate how I, the player, attained musical proficiency without touch. Through a thematic analysis of 235 journal entries, our study distills my strategies for navigating the instrument as well as my personal transformations along the way. We discover that without touch, accurate and musical playing on the theremin relies on continuous auditory feedback, proprioception, and imaginative processes. We discuss challenges and opportunities for embodied and tangible interaction in light of these findings.},
note = {CORE A en 2023
https://portal.core.edu.au/conf-ranks/422/},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiao Xiao; Corinne Bonnet; Haohan Zhang; Nicolas Audibert; Barbara Kühnert; Claire Pillot-Loiseau
Enseignement de l'intonation du français par une synthèse vocale contrôlée par le geste : étude de faisabilité Proceedings Article
Dans: Les 35ème Journées d'Études sur la Parole (JEP), Toulouse, France, 2024, ISBN: NA.
@inproceedings{xiao_3071,
title = {Enseignement de l'intonation du français par une synthèse vocale contrôlée par le geste : étude de faisabilité},
author = {Xiao Xiao and Corinne Bonnet and Haohan Zhang and Nicolas Audibert and Barbara Kühnert and Claire Pillot-Loiseau},
url = {https://jep-taln2024.sciencesconf.org/},
issn = {NA},
year = {2024},
date = {2024-05-01},
booktitle = {Les 35ème Journées d'Études sur la Parole (JEP)},
address = {Toulouse, France},
abstract = {Peut-on enseigner l'intonation française en classe avec une synthèse vocale contrôlée gestuellement sur une tablette ? La fréquence fondamentale et la durée de quatre phrases déclaratives, quatre questions polaires, quatre énoncés exprimant l'incrédulité (1 à 4 syllabes) de deux apprenantes ukrainiennes débutantes en français ont été comparées avant et après quatre entraînements hebdomadaires. Les apprenantes devaient écouter un enregistrement de référence, puis visualiser le modèle sur la tablette, tracer l'intonation manuellement, écouter le résultat synthétisé, et tracer et écouter leur tracé sans guide. Elles produisaient initialement des phrases déclaratives avec une intonation ascendante, et ont différencié les déclarations et les questions polaires après l'entraînement. L'expression de l'incrédulité s'est améliorée pour l'une. L'autre a montré quelques difficultés à maîtriser cette technologie. Cette première étude de cas utilisant la synthèse vocale contrôlée gestuellement est une approche prometteuse permettant plus de pratique de l'intonation en classe.},
keywords = {},
pubstate = {accepted},
tppubtype = {inproceedings}
}
Xiao Xiao
Harmonic Maps: Interactive Visualization of Triad Spaces Based on Spectral Structures Proceedings Article
Dans: Proceedings of Tenor 2023, Boston, USA, 2023, ISBN: To be obtained.
@inproceedings{xiao_2117,
title = {Harmonic Maps: Interactive Visualization of Triad Spaces Based on Spectral Structures},
author = {Xiao Xiao},
url = {https://tenorboston23.sites.northeastern.edu/v},
issn = {To be obtained},
year = {2023},
date = {2023-05-01},
booktitle = {Proceedings of Tenor 2023},
address = {Boston, USA},
abstract = {We present Harmonic Maps, a visualization of three-note chord spaces and an interactive application that allows users to explore in real-time the connection between the visualization and its mapped sounds. While typical harmonic analysis is based only on notes or on an audio signal, our analysis takes a hybrid approach by quantifying different types of interactions between the spectra of notes. These quantifications, which we call Harmonic Descriptors, are derived from acoustic or perceptual models. Three such descriptors are defined and mapped: concordance, third order concordance and roughness.
Harmonic analysis based on spectral structures opens new possibilities beyond traditional note-only or signal-only approaches. They can be applied to a continuum of frequencies, independent of the tuning system, as well as historical and stylistic constraints. Harmonic Maps based on spectral structures can be especially relevant to study the relationship between timbre and harmony. Our interactive exploration of harmonic spaces can have applications for analytical, compositional and educational purposes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiao Xiao
Performative Vocal Synthesis for Foreign Language Intonation Practicev Proceedings Article
Dans: Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems, Hamburg, Germany, 2023, ISBN: 78-1-4503-9421-5/23/04.
@inproceedings{xiao_2253,
title = {Performative Vocal Synthesis for Foreign Language Intonation Practicev},
author = {Xiao Xiao},
url = {https://chi2023.acm.org/},
issn = {78-1-4503-9421-5/23/04},
year = {2023},
date = {2023-04-01},
booktitle = {Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems},
address = {Hamburg, Germany},
abstract = {Typical foreign language (L2) pronunciation training focuses mainly on individual sounds. Intonation, the patterns of pitch change across words or phrases is often neglected, despite its key role in word-level intelligibility and in the expression of attitudes and affect. This paper examines hand-controlled real-time vocal synthesis, known as Performative Vocal Synthesis (PVS), as an interaction technique for practicing L2 intonation in computer aided pronunciation training (CAPT).
We evaluate a tablet-based interface where users gesturally control the pitch of a pre-recorded utterance by drawing curves on the touchscreen. 24 subjects (12 French learners, 12 British controls) imitated English phrases with their voice and the interface. Results of an acoustic analysis and expert perceptive evaluation showed that learners' gestural imitations yielded more accurate results than vocal imitations of the fall-rise intonation pattern typically difficult for francophones, suggesting that PVS can help learners produce intonation patterns beyond the capabilities of their natural voice.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rébecca Kleinberger; Nikhil Singh; Xiao Xiao; Akito Van Troyer
Voice at NIME: a Taxonomy of New Interfaces for Vocal Musical Expression Proceedings Article
Dans: Proceedings of the International Conference on New Interfaces for Musical Expression, Aotearoa, New Zealand, 2022.
@inproceedings{kleinberger_1829,
title = {Voice at NIME: a Taxonomy of New Interfaces for Vocal Musical Expression},
author = {Rébecca Kleinberger and Nikhil Singh and Xiao Xiao and Akito Van Troyer},
url = {https://nime.pubpub.org/pub/180al5zt/},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the International Conference on New Interfaces for Musical Expression},
address = {Aotearoa, New Zealand},
abstract = {We present a systematic review of voice-centered NIME publications from the past two decades. Musical expression has been a key driver of innovation in voice-based technologies, from traditional architectures that amplify singing to cutting-edge research in vocal synthesis. NIME conference has emerged as a prime venue for innovative vocal interfaces. However, there hasn't been a systematic analysis of all voice-related work or an effort to characterize their features. Analyzing trends in Vocal NIMEs can help the community better understand common interests, identify uncharted territories, and explore directions for future research. We identified a corpus of 98 papers about Vocal NIMEs from 2001 to 2021, which we analyzed in 3 ways. First, we automatically extracted latent themes and possible categories using natural language processing. Taking inspiration from concepts surfaced through this process, we then defined several core dimensions with associated descriptors of Vocal NIMEs and assigned each paper relevant descriptors under each dimension. Finally, we defined a classification system, which we then used to uniquely and more precisely situate each paper on a map, taking into account the overall goals of each work. Based on our analyses, we present trends and challenges, including questions of gender and diversity in our community, and reflect on opportunities for future work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
No posts by this author.
N'hésitez pas à contacter le service des admissions pour tout renseignement complémentaire :