Li, Xiaosha; Liu, Chun; Wang, Ziyu When Noise Lowers The Loss: Rethinking Likelihood-Based Evaluation in Music Large Language Models Proceedings Article In: Proceedings of the International Conference on Acoustics, Speech and Signal Processing (ICASSP), Institute of Electrical and Electronics Engineers (IEEE), Barcelona, Spain, 2026. Abstract | Links | BibTeX | Tags: Computer Science - Information Retrieval, Computer Science - Machine Learning, Computer Science - Sound, exposure bias, LLM evaluation, Loss, music LLMs, noise2026
@inproceedings{li_noiselowersloss_2026,
title = {When Noise Lowers The Loss: Rethinking Likelihood-Based Evaluation in Music Large Language Models},
author = {Xiaosha Li and Chun Liu and Ziyu Wang},
url = {https://arxiv.org/abs/2602.02738},
year = {2026},
date = {2026-02-02},
urldate = {2026-02-02},
booktitle = {Proceedings of the International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
address = {Barcelona, Spain},
abstract = {The rise of music large language models (LLMs) demands robust methods of evaluating output quality, especially in distinguishing high-quality compositions from "garbage music". Curiously, we observe that the standard cross-entropy loss \textendash a core training metric \textendash often decrease when models encounter systematically corrupted music, undermining its validity as a standalone quality indicator. To investigate this paradox, we introduce noise injection experiment, where controlled noise signal of varying lengths are injected into musical contexts. We hypothesize that a model's loss reacting positively to these perturbations, specifically a sharp increase ("Peak" area) for short injection, can serve as a proxy for its ability to discern musical integrity. Experiments with MusicGen models in the audio waveform domain confirm that Music LLMs respond more strongly to local, texture-level disruptions than to global semantic corruption. Beyond exposing this bias, our results highlight a new principle: the shape of the loss curve \textendash rather than its absolute value \textendash encodes critical information about the quality of the generated content (i.e., model behavior). We envision this profile-based evaluation as a label-free, model-intrinsic framework for assessing musical quality \textendash opening the door to more principled training objectives and sharper benchmarks.},
keywords = {Computer Science - Information Retrieval, Computer Science - Machine Learning, Computer Science - Sound, exposure bias, LLM evaluation, Loss, music LLMs, noise},
pubstate = {published},
tppubtype = {inproceedings}
}
publications
When Noise Lowers The Loss: Rethinking Likelihood-Based Evaluation in Music Large Language Models Proceedings Article In: Proceedings of the International Conference on Acoustics, Speech and Signal Processing (ICASSP), Institute of Electrical and Electronics Engineers (IEEE), Barcelona, Spain, 2026.2026