用python實現音樂可視化

用python就是庫多,這麼多的庫,你的什麼需求,都有輪子用啊

今天分享一個音樂可視化的程序,什麼是音樂可視化,就像下面這樣
在這裏插入圖片描述
沒錯,就是這個東西,這個似乎有一個名詞來稱呼的吧,什麼名詞呢想不到了,就叫她音樂可視化吧。
基本思路是這樣,這個東西應該就是音樂中不同頻率的分佈情況,那麼如何得到一首歌的頻率分佈情況呢,能想到的就是傅里葉變換了,由時域得到頻域,然後再用matplotlib庫把頻域的信息實時更新到畫面上。思路就是這麼個思路,臥槽那應該如何實現啊,什麼傅里葉,什麼時域頻域,這似乎很麻煩,還好,誰讓python有那麼多庫可以用呢,快速認識一個庫,那就找找有沒有其他人的實現案例,現成的用起來(這次實現參考的這篇博客的實現方法他實現了很多樣式的可視化展示,我拿了其中兩個樣式來用)
導入庫

import numpy as np
import pyaudio
from pydub import AudioSegment, effects
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation

首先是音樂讀取的部分,這裏注意將你的音樂轉換成wav格式,MP3我試了下不行

p = pyaudio.PyAudio()
sound = AudioSegment.from_file(file='Free-Converter.com-a1-86040504.wav')
left = sound.split_to_mono()[0]
fs = left.frame_rate
size = len(left.get_array_of_samples())
channels = left.channels
stream = p.open(
    format=p.get_format_from_width(left.sample_width, ),
    channels=channels,
    rate=fs,
    # input=True,
    output=True,
)
stream.start_stream()

然後是初始化視圖

fig = plt.figure()
ax1 = fig.subplots()
ax1.set_ylim(0, 2)
ax1.set_axis_off()
window = int(0.02 * fs)  # 20ms
g_windows = window // 8

f = np.linspace(20, 20 * 1000, g_windows)
t = np.linspace(0, 20, window)
lf1, = ax1.plot(f, np.zeros(g_windows), lw=1)
lf1.set_antialiased(True)

然後是定義一個視圖的更新函數

color_grade = ['black','blue','yellow','red']
def update(frames):
    if stream.is_active():
        slice = left.get_sample_slice(frames, frames + window)
        data = slice.raw_data
        stream.write(data)
        y = np.array(slice.get_array_of_samples()) / 30000  # 歸一化
        yft = np.abs(np.fft.fft(y)) / (g_windows)
        # print('max',max(yft[:g_windows]),'min',min(yft[:g_windows]))
        # print(max(yft[:g_windows]) - min(yft[:g_windows]))
        # max =
        # min = min(yft[:g_windows])
        grade = int(max(yft[:g_windows]) - min(yft[:g_windows]))
        if 0 <= grade < len(color_grade):
            lf1.set_color(color_grade[grade])
        lf1.set_ydata(yft[:g_windows])
        # lf2.set_ydata(y)
    return lf1,
    # return lf1, lf2,

最後執行

ani = FuncAnimation(fig, update, frames=range(0, size, window), interval=0, blit=True)
plt.show()

你可以去原版的博客看下,原版程序應該是顯示時域和頻域兩個,我這裏把時域去掉了,然後加了點顏色變化效果
對了,還需要下載ffmpeg,解壓後,將bin添加到系統環境變量
這裏是所有源碼

#coding=utf-8
import numpy as np
import pyaudio
from pydub import AudioSegment, effects
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation

#------------------------------------兩條線模式
p = pyaudio.PyAudio()
sound = AudioSegment.from_file(file='Free-Converter.com-a1-86040504.wav')
left = sound.split_to_mono()[0]
fs = left.frame_rate
size = len(left.get_array_of_samples())
channels = left.channels
stream = p.open(
    format=p.get_format_from_width(left.sample_width, ),
    channels=channels,
    rate=fs,
    # input=True,
    output=True,
)
stream.start_stream()

fig = plt.figure()
# ax1, ax2 = fig.subplots(2, 1)
ax1 = fig.subplots()
ax1.set_ylim(0, 2)
# ax2.set_ylim(-1.5, 1.5)
ax1.set_axis_off()
# ax2.set_axis_off()
window = int(0.02 * fs)  # 20ms

g_windows = window // 8


f = np.linspace(20, 20 * 1000, g_windows)
t = np.linspace(0, 20, window)
lf1, = ax1.plot(f, np.zeros(g_windows), lw=1)
lf1.set_antialiased(True)
# lf1.set_fillstyle('left')
# lf1.set_drawstyle('steps-pre')
# lf2, = ax2.plot(t, np.zeros(window), lw=1)

color_grade = ['black','blue','yellow','red']
def update(frames):
    if stream.is_active():
        slice = left.get_sample_slice(frames, frames + window)
        data = slice.raw_data
        stream.write(data)
        y = np.array(slice.get_array_of_samples()) / 30000  # 歸一化
        yft = np.abs(np.fft.fft(y)) / (g_windows)
        # print('max',max(yft[:g_windows]),'min',min(yft[:g_windows]))
        # print(max(yft[:g_windows]) - min(yft[:g_windows]))
        # max =
        # min = min(yft[:g_windows])
        grade = int(max(yft[:g_windows]) - min(yft[:g_windows]))
        if 0 <= grade < len(color_grade):
            lf1.set_color(color_grade[grade])
        lf1.set_ydata(yft[:g_windows])
        # lf2.set_ydata(y)
    return lf1,
    # return lf1, lf2,


ani = FuncAnimation(fig, update, frames=range(0, size, window), interval=0, blit=True)
plt.show()

這裏是另外一個可視化樣式

import matplotlib.pyplot as plt
from scipy.signal import detrend
# from scipy.fftpack import fft
import numpy as np
import pyaudio
from _tkinter import TclError
import struct
import wave
# import librosa
from pydub import AudioSegment
from matplotlib.animation import FuncAnimation

chunk = 600
p = pyaudio.PyAudio()
# sound = AudioSegment.from_file(file='../Music/xxx.mp3')
# rdata = sound.get_array_of_samples()
wf = wave.open('Free-Converter.com-a1-86040504.wav')
stream = p.open(
    format=8,
    channels=wf.getnchannels(),
    rate=wf.getframerate(),
    # input=True,
    output=True,
    # frames_per_buffer=chunk
)

fig = plt.figure()
ax = fig.gca()
ax.set_ylim(0, 1)
ax.set_axis_off()
lf = ax.stem(np.linspace(20, 20000, chunk), np.zeros(chunk), basefmt=':', use_line_collection=True)
# lf.markerline.set_color([0.8, 0.2, 0, 0.5])
lf.markerline.set_color([0.3,0.4,0.5,0.6])
# lf.markerline.set_color('yellow')

def init():
    stream.start_stream()
    return lf


def update(frame):
    if stream.is_active():
        data = wf.readframes(chunk)
        stream.write(data)
        data_int = struct.unpack(str(chunk * 4) + 'B', data)
        y_detrend = detrend(data_int)
        yft = np.abs(np.fft.fft(y_detrend))
        y_vals = yft[:chunk] / (chunk * chunk)
        ind = np.where(y_vals > (np.max(y_vals) + np.min(y_vals)) / 2)
        y_vals[ind[0]] *= 4
        # print(y_vals)
        lf.markerline.set_ydata(y_vals)
    return lf


ani = FuncAnimation(fig, update, frames=None,
                    init_func=init, interval=0, blit=True)
plt.show()

兩個樣式分別是這樣色兒的
在這裏插入圖片描述
在這裏插入圖片描述

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章