如何动画此图像以匹配P5.js中的BPM?
所以我正在使用 p5.js 上课,我很迷茫,因为我不太明白。如何动画此图像以匹配声音?我尝试了频率分析,但我不知道如何应用于图像。我想在它跳动时制作动画,就像心脏一样,但根据我在草图中放入的 bpm 声音。这是草图 + 图像 + 声音
https://editor.p5js.org/FilipaRita/sketches/cUG6qNhIR
回答
实际上找到整首音乐的 BMP 会有点复杂(请参阅这个 sound.stackexchange.com 问题),但是如果您只想实时检测节拍,我认为您可以将一些可以工作的东西组合在一起。这是一个我认为可以帮助您理解由 返回的数据的可视化fft.analyze():
const avgWindow = 20;
const threshold = 0.4;
let song;
let fft;
let beat;
let lastPeak;
function preload() {
song = loadSound("https://www.paulwheeler.us/files/metronome.wav");
}
function setup() {
createCanvas(400, 400);
fft = new p5.FFT();
song.loop();
beat = millis();
}
function draw() {
// Pulse white on the beat, then fade out with an inverse cube curve
background(map(1 / pow((millis() - beat) / 1000 + 1, 3), 1, 0, 255, 100));
drawSpectrumGraph(0, 0, width, height);
}
let i = 0;
// Graphing code adapted from https://jankozeluh.g6.cz/index.html by Jan Koželuh
function drawSpectrumGraph(left, top, w, h) {
let spectrum = fft.analyze();
stroke('limegreen');
fill('darkgreen');
strokeWeight(1);
beginShape();
vertex(left, top + h);
let peak = 0;
// compute a running average of values to avoid very
// localized energy from triggering a beat.
let runningAvg = 0;
for (let i = 0; i < spectrum.length; i++) {
vertex(
//left + map(i, 0, spectrum.length, 0, w),
// Distribute the spectrum values on a logarithmic scale
// We do this because as you go higher in the spectrum
// the same perceptible difference in tone requires a
// much larger chang in frequency.
left + map(log(i), 0, log(spectrum.length), 0, w),
// Spectrum values range from 0 to 255
top + map(spectrum[i], 0, 255, h, 0)
);
runningAvg += spectrum[i] / avgWindow;
if (i >= avgWindow) {
runningAvg -= spectrum[i] / avgWindow;
}
if (runningAvg > peak) {
peak = runningAvg;
}
}
// any time there is a sudden increase in peak energy, call that a beat
if (peak > lastPeak * (1 + threshold)) {
// print(`tick ${++i}`);
beat = millis();
}
lastPeak = peak;
vertex(left + w, top + h);
endShape(CLOSE);
// this is the range of frequencies covered by the FFT
let nyquist = 22050;
// get the centroid (value in hz)
let centroid = fft.getCentroid();
// the mean_freq_index calculation is for the display.
// centroid frequency / hz per bucket
let mean_freq_index = centroid / (nyquist / spectrum.length);
stroke('red');
// convert index to x value using a logarithmic x axis
let cx = map(log(mean_freq_index), 0, log(spectrum.length), 0, width);
line(cx, 0, cx, h);
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.3.1/p5.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.3.1/addons/p5.sound.min.js"></script>
希望这段带有注释的代码可以帮助您理解返回的数据,fft.analyze()并且您可以以此为起点来实现您正在寻找的效果。
免责声明:我有使用 p5.js 的经验,但我不是音频专家,所以肯定有更好的方法来做到这一点。此外,虽然这种方法适用于这个简单的音频文件,但它很有可能在实际音乐或现实世界环境中失败。