音乐可视化

效果

音乐可视化

实现思路

  1. 获取音频数据
  2. canvas 绘制
  3. 音频数据变化,canvas 变化
    html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Document</title>
<link
rel="shortcut icon"
href="./thumb-1920-548850 (2).jpg"
type="image/x-icon"
/>
<style>
body {
height: 100vh;
margin: 0 auto;
background-color: #363535;
position: relative;
}
canvas {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
}
audio {
position: absolute;
bottom: 100px;
right: 50%;
transform: translate(50%, 0);
width: 500px;
height: 100px;
background-color: #363535;
border-radius: 20px;
}
</style>
</head>
<body>
<canvas></canvas>
<audio src="./喜欢你-G.E.M.邓紫棋.320.mp3" controls></audio>
<script src="./index.js"></script>
</body>
</html>

js

const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
const audio = document.querySelector("audio");
//初始化画布尺寸
const initCanvas = () => {
// devicePixelRatio 返回当前显示设备的物理像素分辨率与CSS 像素分辨率之比
const size = 600;
canvas.width = size * devicePixelRatio;
canvas.height = size * devicePixelRatio;
canvas.style.width = canvas.style.height = `${size}px`;
};
initCanvas();

/**
* 根据音频数据绘制频谱图
* @param {Array} data 音频数据
* @param {number} maxVal 最大频域值
*/
function draw(data, maxVal) {
//频谱图半径
const r = canvas.width / 4 + 20 * devicePixelRatio;
//中心点
const center = canvas.width / 2;
//清除画布
ctx.clearRect(0, 0, canvas.width, canvas.height);
//计算频域值与频谱图颜色的映射关系
const hslStep = 360 / (data.length - 1);
//频谱图最大长度
const maxLen = canvas.width / 2 - r;
//频谱图最小长度
const minLen = 2 * devicePixelRatio;
for (let i = 0; i < data.length; i++) {
ctx.beginPath();
//计算当前频域值所对应的频谱图长度
const len = Math.max((data[i] / maxVal) * maxLen, minLen);
//计算当前频域值所对应的频谱图颜色
const rotate = hslStep * i;
//随机颜色
ctx.strokeStyle = `hsl(${rotate}deg,65%,65%)`;
ctx.lineWidth = minLen;
const rad = (rotate * Math.PI) / 180;
//计算频谱图的起始点
const startX = center + Math.cos(rad) * r;
const startY = center + Math.sin(rad) * r;
const endX = center + Math.cos(rad) * (r + len);
const endY = center + Math.sin(rad) * (r + len);
ctx.moveTo(startX, startY);
ctx.lineTo(endX, endY);
ctx.stroke();
}
}

//初始化画布
draw(new Array(256).fill(0), 255);

//标记上下文的创建
let isInit = false;
//音频分析器节点
let analyser;
//用于接收分析结果
let buffer;

audio.onplay = () => {
//音频上下文只需要创建一次
if (isInit) return;
//创建音频上下文
createAudio();
};

const createAudio = () => {
//创建音频上下文
const audioCtx = new AudioContext();
//创建音频源节点
const source = audioCtx.createMediaElementSource(audio);
//分析器节点
analyser = audioCtx.createAnalyser();
//频域数据分析的采样率
analyser.fftSize = 256;
// buffer的长度为fftSize/2 ->无符号的8位类型化数组
buffer = new Uint8Array(analyser.frequencyBinCount);
//连接音频源节点
source.connect(analyser);
//连接音频输出节点 也就是链接到输出设备
source.connect(audioCtx.destination);
isInit = true;
};

const update = () => {
requestAnimationFrame(update);
//获取频域数据
analyser.getByteFrequencyData(buffer);
//处理频域数据 让其更加的平滑
const offset = Math.floor((buffer.length * 2) / 3);
const data = new Array(offset * 2);
for (let i = 0; i < offset; i++) {
//对称的赋值方式,前半部分和后半部分的数据在变换时具有一定的连续性和平滑性
data[i] = data[data.length - 1 - i] = buffer[i];
}
//绘制频谱图
draw(data, 255);
};
update();