Kim, Yonghyun; Park, Junhyung; Bae, Joonhyung; Kim, Kirak; Kwon, Taegyun; Lerch, Alexander; Nam, Juhan PianoVAM: A Multimodal Piano Performance Dataset Proceedings Article In: Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), Daejeon, South Korea, 2025. Abstract | Links | BibTeX | Tags: Computer Science - Artificial Intelligence, Computer Science - Computer Vision and Pattern Recognition, Computer Science - Multimedia, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing Park, Junhyung; Kim, Yonghyun; Bae, Joonhyung; Kim, Kirak; Kwon, Taegyun; Lerch, Alexander; Nam, Juhan Two Web Toolkits for Multimodal Piano Performance Dataset Acquisition and Fingering Annotation Proceedings Article In: Late Breaking Demo (Extended Abstract), Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), Daejeon, South Korea, 2025. Abstract | Links | BibTeX | Tags: Computer Science - Computer Vision and Pattern Recognition, Computer Science - Multimedia, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing, Electrical Engineering and Systems Science - Image and Video Processing2025
@inproceedings{kim_pianovam_2025,
title = {PianoVAM: A Multimodal Piano Performance Dataset},
author = {Yonghyun Kim and Junhyung Park and Joonhyung Bae and Kirak Kim and Taegyun Kwon and Alexander Lerch and Juhan Nam},
url = {http://arxiv.org/abs/2509.08800},
doi = {10.48550/arXiv.2509.08800},
year = {2025},
date = {2025-09-01},
urldate = {2025-09-19},
booktitle = {Proceedings of the International Society for Music Information Retrieval Conference (ISMIR)},
address = {Daejeon, South Korea},
abstract = {The multimodal nature of music performance has driven increasing interest in data beyond the audio domain within the music information retrieval (MIR) community. This paper introduces PianoVAM, a comprehensive piano performance dataset that includes videos, audio, MIDI, hand landmarks, fingering labels, and rich metadata. The dataset was recorded using a Disklavier piano, capturing audio and MIDI from amateur pianists during their daily practice sessions, alongside synchronized top-view videos in realistic and varied performance conditions. Hand landmarks and fingering labels were extracted using a pretrained hand pose estimation model and a semi-automated fingering annotation algorithm. We discuss the challenges encountered during data collection and the alignment process across different modalities. Additionally, we describe our fingering annotation method based on hand landmarks extracted from videos. Finally, we present benchmarking results for both audio-only and audio-visual piano transcription using the PianoVAM dataset and discuss additional potential applications.},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Computer Vision and Pattern Recognition, Computer Science - Multimedia, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing},
pubstate = {published},
tppubtype = {inproceedings}
}
@inproceedings{park_two_2025,
title = {Two Web Toolkits for Multimodal Piano Performance Dataset Acquisition and Fingering Annotation},
author = {Junhyung Park and Yonghyun Kim and Joonhyung Bae and Kirak Kim and Taegyun Kwon and Alexander Lerch and Juhan Nam},
url = {http://arxiv.org/abs/2509.15222},
doi = {10.48550/arXiv.2509.15222},
year = {2025},
date = {2025-09-01},
urldate = {2025-09-20},
booktitle = {Late Breaking Demo (Extended Abstract), Proceedings of the International Society for Music Information Retrieval Conference (ISMIR)},
address = {Daejeon, South Korea},
abstract = {Piano performance is a multimodal activity that intrinsically combines physical actions with the acoustic rendition. Despite growing research interest in analyzing the multimodal nature of piano performance, the laborious process of acquiring large-scale multimodal data remains a significant bottleneck, hindering further progress in this field. To overcome this barrier, we present an integrated web toolkit comprising two graphical user interfaces (GUIs): (i) PiaRec, which supports the synchronized acquisition of audio, video, MIDI, and performance metadata. (ii) ASDF, which enables the efficient annotation of performer fingering from the visual data. Collectively, this system can streamline the acquisition of multimodal piano performance datasets.},
keywords = {Computer Science - Computer Vision and Pattern Recognition, Computer Science - Multimedia, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing, Electrical Engineering and Systems Science - Image and Video Processing},
pubstate = {published},
tppubtype = {inproceedings}
}
publications
PianoVAM: A Multimodal Piano Performance Dataset Proceedings Article In: Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), Daejeon, South Korea, 2025. Two Web Toolkits for Multimodal Piano Performance Dataset Acquisition and Fingering Annotation Proceedings Article In: Late Breaking Demo (Extended Abstract), Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), Daejeon, South Korea, 2025.2025