fix: use the audio device's channels, rather than the source's channels

yes, I know this is dumb, I knew something was wrong when I had to do it
that way, but I was dumb. Now... I'm still dumb, just slightly more
knowledgable.
This commit is contained in:
2025-03-25 11:45:40 +01:00
parent 9d41a2a65b
commit 646629adc1

View File

@@ -187,7 +187,7 @@ audiodata audio_wav_load(audiodevice const* dev, char const* fpath) {
// calculate the time in milliseconds of the audio fragment // calculate the time in milliseconds of the audio fragment
// by dividing the audio bytelength by the format's bitsize, by the audio device's channels and the audio device's frequency // by dividing the audio bytelength by the format's bitsize, by the audio device's channels and the audio device's frequency
audio.ms = (((1000 * audio.len) / (SDL_AUDIO_BITSIZE(dev->fmt) / 8)) / spec.channels / dev->freq); audio.ms = (((1000 * audio.len) / (SDL_AUDIO_BITSIZE(dev->fmt) / 8)) / dev->channels / dev->freq);
return audio; return audio;
} }