频域特征-Fbank

Fbank是一种前端处理方法,以类似人耳的方式对音频进行处理,可以提高语音识别的性能。fbank的计算流程与语谱图类似,唯一的区别就在于加了个Mel滤波器,从而使得得到的特征更逼近人耳特性。有关于Mel滤波器的相关内容可以查阅https://mp.weixin.qq.com/s/pGwO_27x8ddQF55wTSQlmA。接下来就介绍一下fbank的求取过程。
• 预加重
因为语音信号的功率谱随频率的增加而减小,因此导致语音的大部分能量都集中在低频部分,从而导致高频部分的信噪比很低。因此一般使用一阶高通滤波器去提升信号在高频部分的信噪比。

def preemphasis(signal, coeff=0.95):
    return np.append(signal[1], signal[1:] - coeff * signal[:-1])

• 分帧及加窗
有关分帧的具体原理可以参见https://mp.weixin.qq.com/s/PKBZgFXicNHghb39iyPfow。

def frame_sig(sig, frame_len, frame_step, win_func):
    '''
    :param sig: 输入的语音信号
    :param frame_len: 帧长
    :param frame_step: 帧移
    :param win_func: 窗函数
    :return: array of frames, num_frame * frame_len
    '''
    slen = len(sig)
    if slen <= frame_len:
        num_frames = 1
    else:
        # np.ceil(), 向上取整
        num_frames = 1 + int(np.ceil((slen - frame_len) / frame_step))
    padlen = int( (num_frames - 1) * frame_step + frame_len)
    # 将信号补长,使得(slen - frame_len) /frame_step整除
    zeros = np.zeros((padlen - slen,))
    padSig = np.concatenate((sig, zeros))
    indices = np.tile(np.arange(0, frame_len), (num_frames, 1)) + np.tile(np.arange(0, num_frames*frame_step, frame_step), (frame_len, 1)).T
    indices = np.array(indices, dtype=np.int32)
    frames = padSig[indices]
    win = np.tile(win_func(frame_len), (num_frames, 1))
    return frames * win

• FFT
对提取出来的帧信号进行傅里叶变换

complex_spec = np.fft.rfft(frames, NFFT)

• 幅值平方

np.square(np.abs(complex_spec))

• Mel滤波器
在先前的章节中,已经介绍了Mel滤波器的求法,在此不再赘述。具体可参考https://mp.weixin.qq.com/s/pGwO_27x8ddQF55wTSQlmA。

def filterbank(nfilt=40, nfft=512, samplerate=16000, lowfreq=20, highfreq=None):
    low_freq = lowfreq
    if highfreq is None:
        highfreq = samplerate // 2
    low_mel = hz2mel(low_freq)
    high_mel = hz2mel(highfreq)
    mel_points = np.linspace(low_mel, high_mel, nfilt + 2)
    binf = np.floor((nfft + 1) * mel2hz(mel_points) / samplerate)
    fbank = np.zeros([nfilt, int(nfft / 2 + 1)])
    for indexj in range(0, nfilt):
        left = binf[indexj]
        center = binf[indexj + 1]
        right = binf[indexj + 2]
        for indexi in range(int(left), int(center)):
            fbank[indexj, indexi] = (indexi - left) / ( center - left)
        for indexi in range(int(center), int(right)):
            fbank[indexj, indexi] = (right - indexi) / ( right -center)
    return fbank

• 对数功率

librosa.power_to_db(feature.T)

然后就能得到fbank特征。完整的代码如下

import numpy as np
import soundfile as sf
import python_speech_features as psf
import librosa
import librosa.display
import matplotlib.pyplot as plt
def frame_sig(sig, frame_len, frame_step, win_func):
    '''
    :param sig: 输入的语音信号
    :param frame_len: 帧长
    :param frame_step: 帧移
    :param win_func: 窗函数
    :return: array of frames, num_frame * frame_len
    '''
    slen = len(sig)
    if slen <= frame_len:
        num_frames = 1
    else:
        # np.ceil(), 向上取整
        num_frames = 1 + int(np.ceil((slen - frame_len) / frame_step))
    padlen = int( (num_frames - 1) * frame_step + frame_len)
    # 将信号补长,使得(slen - frame_len) /frame_step整除
    zeros = np.zeros((padlen - slen,))
    padSig = np.concatenate((sig, zeros))
    indices = np.tile(np.arange(0, frame_len), (num_frames, 1)) + np.tile(np.arange(0, num_frames*frame_step, frame_step), (frame_len, 1)).T
    indices = np.array(indices, dtype=np.int32)
    frames = padSig[indices]
    win = np.tile(win_func(frame_len), (num_frames, 1))
    return frames * win
def preemphasis(signal, coeff=0.95):
    return np.append(signal[1], signal[1:] - coeff * signal[:-1])
def pow_spec(frames, NFFT):
    complex_spec = np.fft.rfft(frames, NFFT)
    return 1 / NFFT * np.square(np.abs(complex_spec))
def hz2mel(hz):
    return 2595 * np.log10(1 + hz / 700.)
def mel2hz(mel):
    return 700 * (10 ** (mel / 2595.0) - 1)
def filterbank(nfilt=40, nfft=512, samplerate=16000, lowfreq=20, highfreq=None):
    low_freq = lowfreq
    if highfreq is None:
        highfreq = samplerate // 2
    low_mel = hz2mel(low_freq)
    high_mel = hz2mel(highfreq)
    mel_points = np.linspace(low_mel, high_mel, nfilt + 2)
    binf = np.floor((nfft + 1) * mel2hz(mel_points) / samplerate)
    fbank = np.zeros([nfilt, int(nfft / 2 + 1)])
    for indexj in range(0, nfilt):
        left = binf[indexj]
        center = binf[indexj + 1]
        right = binf[indexj + 2]
        for indexi in range(int(left), int(center)):
            fbank[indexj, indexi] = (indexi - left) / ( center - left)
        for indexi in range(int(center), int(right)):
            fbank[indexj, indexi] = (right - indexi) / ( right -center)
    return fbank
y, sr = sf.read('q1.wav')
'预加重'
y = preemphasis(y, coeff=0.98)
'分帧'
frames = frame_sig(y, frame_len=2048, frame_step= 512, win_func=np.hanning)
features = pow_spec(frames, NFFT=2048)
nfilt = 26
nfft = 2048
fb = filterbank(nfilt, nfft, sr, lowfreq=20, highfreq=sr // 2)
feature = np.dot(features, fb.T)
librosa.display.specshow(librosa.power_to_db(feature.T),sr=sr, x_axis='time', y_axis='linear')
plt.title('Spectrogram')
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
plt.show()

原文地址:https://www.cnblogs.com/tingweichen/p/14664789.html