Watcharasupat, Karn N.; Ding, Yiwei; Ma, T. Aleksandra; Seshadri, Pavan; Lerch, Alexander Uncertainty Estimation in the Real World: A Study on Music Emotion Recognition Proceedings Article In: Proceedings of the European Conference on Information Retrieval (ECIR), arXiv, Lucca, Italy, 2025. Abstract | Links | BibTeX | Tags: Computer Science - Artificial Intelligence, Computer Science - Information Retrieval, Computer Science - Machine Learning, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing Kim, Yonghyun; Lerch, Alexander Towards Robust Transcription: Exploring Noise Injection Strategies for Training Data Augmentation Proceedings Article In: Late Breaking Demo (Extended Abstract), Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), arXiv, San Francisco, 2024. Abstract | Links | BibTeX | Tags: Computer Science - Artificial Intelligence, Computer Science - Information Retrieval, Computer Science - Machine Learning, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing Ma, T. Aleksandra; Lerch, Alexander Music auto-tagging in the long tail: A few-shot approach Proceedings Article In: Proceedings of the AES Convention, New York, 2024. Abstract | Links | BibTeX | Tags: Computer Science - Information Retrieval, Computer Science - Machine Learning, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing, H.3.3 Watcharasupat, Karn N.; Lerch, Alexander A Stem-Agnostic Single-Decoder System for Music Source Separation Beyond Four Stems Proceedings Article In: Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), San Francisco, 2024. Abstract | Links | BibTeX | Tags: Computer Science - Artificial Intelligence, Computer Science - Information Retrieval, Computer Science - Machine Learning, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing Ding, Yiwei; Lerch, Alexander Audio Embeddings as Teachers for Music Classification Proceedings Article In: Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), Milan, Italy, 2023. Abstract | Links | BibTeX | Tags: Computer Science - Information Retrieval, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing Watcharasupat, Karn N; Lerch, Alexander Evaluation of Latent Space Disentanglement in the Presence of Interdependent Attributes Proceedings Article In: Late Breaking Demo (Extended Abstract), Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), Online, 2021. Abstract | Links | BibTeX | Tags: Computer Science - Information Retrieval, Computer Science - Information Theory, Computer Science - Machine Learning, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing2025
@inproceedings{watcharasupat_uncertainty_2025,
title = {Uncertainty Estimation in the Real World: A Study on Music Emotion Recognition},
author = {Karn N. Watcharasupat and Yiwei Ding and T. Aleksandra Ma and Pavan Seshadri and Alexander Lerch},
url = {http://arxiv.org/abs/2501.11570},
doi = {10.48550/arXiv.2501.11570},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-30},
booktitle = {Proceedings of the European Conference on Information Retrieval (ECIR)},
publisher = {arXiv},
address = {Lucca, Italy},
abstract = {Any data annotation for subjective tasks shows potential variations between individuals. This is particularly true for annotations of emotional responses to musical stimuli. While older approaches to music emotion recognition systems frequently addressed this uncertainty problem through probabilistic modeling, modern systems based on neural networks tend to ignore the variability and focus only on predicting central tendencies of human subjective responses. In this work, we explore several methods for estimating not only the central tendencies of the subjective responses to a musical stimulus, but also for estimating the uncertainty associated with these responses. In particular, we investigate probabilistic loss functions and inference-time random sampling. Experimental results indicate that while the modeling of the central tendencies is achievable, modeling of the uncertainty in subjective responses proves significantly more challenging with currently available approaches even when empirical estimates of variations in the responses are available.},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Information Retrieval, Computer Science - Machine Learning, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
@inproceedings{kim_towards_2024,
title = {Towards Robust Transcription: Exploring Noise Injection Strategies for Training Data Augmentation},
author = {Yonghyun Kim and Alexander Lerch},
url = {http://arxiv.org/abs/2410.14122},
doi = {10.48550/arXiv.2410.14122},
year = {2024},
date = {2024-10-01},
urldate = {2024-10-25},
booktitle = {Late Breaking Demo (Extended Abstract), Proceedings of the International Society for Music Information Retrieval Conference (ISMIR)},
publisher = {arXiv},
address = {San Francisco},
abstract = {Recent advancements in Automatic Piano Transcription (APT) have significantly improved system performance, but the impact of noisy environments on the system performance remains largely unexplored. This study investigates the impact of white noise at various Signal-to-Noise Ratio (SNR) levels on state-of-the-art APT models and evaluates the performance of the Onsets and Frames model when trained on noise-augmented data. We hope this research provides valuable insights as preliminary work toward developing transcription models that maintain consistent performance across a range of acoustic conditions.},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Information Retrieval, Computer Science - Machine Learning, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing},
pubstate = {published},
tppubtype = {inproceedings}
}
@inproceedings{ma_music_2024,
title = {Music auto-tagging in the long tail: A few-shot approach},
author = {T. Aleksandra Ma and Alexander Lerch},
url = {http://arxiv.org/abs/2409.07730},
doi = {10.48550/arXiv.2409.07730},
year = {2024},
date = {2024-09-01},
urldate = {2024-09-13},
booktitle = {Proceedings of the AES Convention},
address = {New York},
abstract = {In the realm of digital music, using tags to efficiently organize and retrieve music from extensive databases is crucial for music catalog owners. Human tagging by experts is labor-intensive but mostly accurate, whereas automatic tagging through supervised learning has approached satisfying accuracy but is restricted to a predefined set of training tags. Few-shot learning offers a viable solution to expand beyond this small set of predefined tags by enabling models to learn from only a few human-provided examples to understand tag meanings and subsequently apply these tags autonomously. We propose to integrate few-shot learning methodology into multi-label music auto-tagging by using features from pre-trained models as inputs to a lightweight linear classifier, also known as a linear probe. We investigate different popular pre-trained features, as well as different few-shot parametrizations with varying numbers of classes and samples per class. Our experiments demonstrate that a simple model with pre-trained features can achieve performance close to state-of-the-art models while using significantly less training data, such as 20 samples per tag. Additionally, our linear probe performs competitively with leading models when trained on the entire training dataset. The results show that this transfer learning-based few-shot approach could effectively address the issue of automatically assigning long-tail tags with only limited labeled data.},
keywords = {Computer Science - Information Retrieval, Computer Science - Machine Learning, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing, H.3.3},
pubstate = {published},
tppubtype = {inproceedings}
}
@inproceedings{watcharasupat_stem-agnostic_2024,
title = {A Stem-Agnostic Single-Decoder System for Music Source Separation Beyond Four Stems},
author = {Karn N. Watcharasupat and Alexander Lerch},
url = {http://arxiv.org/abs/2406.18747},
doi = {10.48550/arXiv.2406.18747},
year = {2024},
date = {2024-06-01},
urldate = {2024-08-08},
booktitle = {Proceedings of the International Society for Music Information Retrieval Conference (ISMIR)},
address = {San Francisco},
abstract = {Despite significant recent progress across multiple subtasks of audio source separation, few music source separation systems support separation beyond the four-stem vocals, drums, bass, and other (VDBO) setup. Of the very few current systems that support source separation beyond this setup, most continue to rely on an inflexible decoder setup that can only support a fixed pre-defined set of stems. Increasing stem support in these inflexible systems correspondingly requires increasing computational complexity, rendering extensions of these systems computationally infeasible for long-tail instruments. In this work, we propose Banquet, a system that allows source separation of multiple stems using just one decoder. A bandsplit source separation model is extended to work in a query-based setup in tandem with a music instrument recognition PaSST model. On the MoisesDB dataset, Banquet, at only 24.9 M trainable parameters, approached the performance level of the significantly more complex 6-stem Hybrid Transformer Demucs on VDBO stems and outperformed it on guitar and piano. The query-based setup allows for the separation of narrow instrument classes such as clean acoustic guitars, and can be successfully applied to the extraction of less common stems such as reeds and organs. Implementation is available at https://github.com/kwatcharasupat/query-bandit.},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Information Retrieval, Computer Science - Machine Learning, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
@inproceedings{ding_audio_2023,
title = {Audio Embeddings as Teachers for Music Classification},
author = {Yiwei Ding and Alexander Lerch},
url = {http://arxiv.org/abs/2306.17424},
doi = {10.48550/arXiv.2306.17424},
year = {2023},
date = {2023-06-01},
urldate = {2023-06-01},
booktitle = {Proceedings of the International Society for Music Information Retrieval Conference (ISMIR)},
address = {Milan, Italy},
abstract = {Music classification has been one of the most popular tasks in the field of music information retrieval. With the development of deep learning models, the last decade has seen impressive improvements in a wide range of classification tasks. However, the increasing model complexity makes both training and inference computationally expensive. In this paper, we integrate the ideas of transfer learning and feature-based knowledge distillation and systematically investigate using pre-trained audio embeddings as teachers to guide the training of low-complexity student networks. By regularizing the feature space of the student networks with the pre-trained embeddings, the knowledge in the teacher embeddings can be transferred to the students. We use various pre-trained audio embeddings and test the effectiveness of the method on the tasks of musical instrument classification and music auto-tagging. Results show that our method significantly improves the results in comparison to the identical model trained without the teacher's knowledge. This technique can also be combined with classical knowledge distillation approaches to further improve the model's performance.},
keywords = {Computer Science - Information Retrieval, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing},
pubstate = {published},
tppubtype = {inproceedings}
}
2021
@inproceedings{watcharasupat_evaluation_2021,
title = {Evaluation of Latent Space Disentanglement in the Presence of Interdependent Attributes},
author = {Karn N Watcharasupat and Alexander Lerch},
url = {http://arxiv.org/abs/2110.05587},
year = {2021},
date = {2021-10-01},
urldate = {2021-11-11},
booktitle = {Late Breaking Demo (Extended Abstract), Proceedings of the International Society for Music Information Retrieval Conference (ISMIR)},
address = {Online},
abstract = {Controllable music generation with deep generative models has become increasingly reliant on disentanglement learning techniques. However, current disentanglement metrics, such as mutual information gap (MIG), are often inadequate and misleading when used for evaluating latent representations in the presence of interdependent semantic attributes often encountered in real-world music datasets. In this work, we propose a dependency-aware information metric as a drop-in replacement for MIG that accounts for the inherent relationship between semantic attributes.},
keywords = {Computer Science - Information Retrieval, Computer Science - Information Theory, Computer Science - Machine Learning, Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing},
pubstate = {published},
tppubtype = {inproceedings}
}
publications
Uncertainty Estimation in the Real World: A Study on Music Emotion Recognition Proceedings Article In: Proceedings of the European Conference on Information Retrieval (ECIR), arXiv, Lucca, Italy, 2025. Towards Robust Transcription: Exploring Noise Injection Strategies for Training Data Augmentation Proceedings Article In: Late Breaking Demo (Extended Abstract), Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), arXiv, San Francisco, 2024. Music auto-tagging in the long tail: A few-shot approach Proceedings Article In: Proceedings of the AES Convention, New York, 2024. A Stem-Agnostic Single-Decoder System for Music Source Separation Beyond Four Stems Proceedings Article In: Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), San Francisco, 2024. Audio Embeddings as Teachers for Music Classification Proceedings Article In: Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), Milan, Italy, 2023. Evaluation of Latent Space Disentanglement in the Presence of Interdependent Attributes Proceedings Article In: Late Breaking Demo (Extended Abstract), Proceedings of the International Society for Music Information Retrieval Conference (ISMIR), Online, 2021.2025
2024
2023
2021