Python语音信号处理:时域波形、频谱和语谱图可视化
- * - coding: utf-8 - * -/nimport librosa/nimport matplotlib/nimport numpy as np/nimport matplotlib.pyplot as plt/nfrom scipy.fft import fft/nimport librosa.display/n/n/n/nplt.figure(dpi=600) # 将显示的所有图分辨率调高/nmatplotlib.rc(/'font/',family='SimHei') # 显示中文/nmatplotlib.rcParams['axes.unicode_minus']=False # 显示符号/n/n/ndef displayWaveform(sample1, sample2): # 显示语音时域波形/n '''/n display waveform of a given speech sample/n :param sample_name: speech sample name/n :param fs: sample frequency/n :return:/n '''/n samples1, sr1 = librosa.load(sample1, sr=16000)/n samples2, sr2 = librosa.load(sample2, sr=16000)/n # samples = samples[6000:16000]/n/n #min_len = min(len(samples1), len(samples2))/n #samples1 = samples1[:min_len]/n #samples2 = samples2[:min_len]/n/n #print(len(samples1), sr1)/n #print(len(samples2), sr2)/n #time = np.arange(0, min_len) * (1.0 / sr1)/n/n print(len(samples1), sr1)/n print(len(samples2), sr2)/n time1 = np.arange(0, len(samples1)) * (1.0 / sr1)/n time2 = np.arange(0, len(samples2)) * (1.0 / sr2)/n/n plt.figure(figsize=(18, 8))/n ax1 = plt.subplot(211)/n plt.plot(time1, samples1)/n plt.title('原音频时域波形')/n plt.xlabel('时长(秒)')/n plt.ylabel('振幅')/n #plt.xlim(time1[0], time1[-1]) # 设置横坐标范围/n/n ax2 = plt.subplot(212, sharex=ax1, sharey=ax1)/n #plt.ylim(-0.4, 0.4)/n #plt.yticks(np.arange(-0.4, 0.5, 0.2))/n plt.plot(time2, samples2)/n plt.title('降噪后音频时域波形')/n plt.xlabel('时长(秒)')/n plt.ylabel('振幅')/n #plt.xlim(time2[0], time2[-1]) # 设置横坐标范围/n plt.subplots_adjust(hspace=0.5) # 调整子图间距/n # plt.savefig(/'your dir//语音信号时域波形图/', dpi=600)/n plt.show()/n/ndef displaySpectrum(sample1, sample2): # 显示语音频域谱线/n x1, sr1 = librosa.load(sample1, sr=16000)/n x2, sr2 = librosa.load(sample2, sr=16000)/n print(len(x1), len(x2))/n # ft = librosa.stft(x)/n # magnitude = np.abs(ft) # 对fft的结果直接取模(取绝对值),得到幅度magnitude/n # frequency = np.angle(ft) # (0, 16000, 121632)/n/n ft1 = fft(x1)/n ft2 = fft(x2)/n magnitude1 = np.absolute(ft1) # 对fft的结果直接取模(取绝对值),得到幅度magnitude/n magnitude2 = np.absolute(ft2) # 对fft的结果直接取模(取绝对值),得到幅度magnitude/n frequency1 = np.linspace(0, sr1, len(magnitude1)) # (0, 16000, 121632)/n frequency2 = np.linspace(0, sr2, len(magnitude2)) # (0, 16000, 121632)/n/n print(len(magnitude1), type(magnitude1), np.max(magnitude1), np.min(magnitude1))/n print(len(frequency1), type(frequency1), np.max(frequency1), np.min(frequency1))/n print(len(magnitude2), type(magnitude2), np.max(magnitude2), np.min(magnitude2))/n print(len(frequency2), type(frequency2), np.max(frequency2), np.min(frequency2))/n/n # plot spectrum,限定[:40000]/n plt.figure(figsize=(18, 8))/n plt.subplot(211)/n plt.plot(frequency1[:40000], magnitude1[:40000]) # magnitude spectrum/n plt.title('原音频频域谱线')/n plt.xlabel('频率(赫兹)')/n plt.ylabel('幅度')/n plt.subplot(212)/n plt.plot(frequency2[:40000], magnitude2[:40000]) # magnitude spectrum/n #plt.ylim(0, 1000)/n plt.title('降噪后音频频域谱线')/n plt.xlabel('频率(赫兹)')/n plt.ylabel('幅度')/n plt.subplots_adjust(hspace=0.5) # 调整子图间距/n #plt.savefig(/'your dir//语音信号频谱图/', dpi=600)/n plt.show()/n/n/ndef displaySpectrogram(sample1, sample2):/n x1, sr1 = librosa.load(sample1, sr=16000)/n x2, sr2 = librosa.load(sample2, sr=16000)/n/n # compute power spectrogram with stft(short-time fourier transform):/n # 基于stft,计算power spectrogram/n spectrogram1 = librosa.amplitude_to_db(librosa.stft(x1))/n spectrogram2 = librosa.amplitude_to_db(librosa.stft(x2))/n/n # show/n plt.figure(figsize=(18, 8))/n plt.subplot(211)/n librosa.display.specshow(spectrogram1, y_axis='log')/n plt.colorbar(format='%+2.0f dB')/n plt.title('原音频对数谱图')/n plt.xlabel('时长(秒)')/n plt.ylabel('频率(赫兹)')/n plt.subplot(212)/n librosa.display.specshow(spectrogram2, y_axis='log')/n plt.colorbar(format='%+2.0f dB')/n plt.title('降噪后音频对数谱图')/n plt.xlabel('时长(秒)')/n plt.ylabel('频率(赫兹)')/n plt.subplots_adjust(hspace=0.5) # 调整子图间距/n plt.show()/n/n/nif name == 'main':/n sample1 = r'p306_023.wav'/n sample2 = r'enhanced_p306_023G_88_0.2559.wav'/n displayWaveform(sample1, sample2)/n displaySpectrum(sample1, sample2)/n displaySpectrogram(sample1, sample2)/n出现报错Traceback (most recent call last):/n File /'D://graduation_design//subsequent_processing.py/', line 130, in /n displayWaveform(sample1, sample2)/n File /'D://graduation_design//subsequent_processing.py/', line 50, in displayWaveform/n plt.subplot(212,sharex=True, sharey=True)/n File /'D://python//lib//site-packages//matplotlib//pyplot.py/', line 1030, in subplot/n a = fig.add_subplot(*args, **kwargs)/n File /'D://python//lib//site-packages//matplotlib//figure.py/', line 1419, in add_subplot/n a = subplot_class_factory(projection_class)(self, *args, **kwargs)/n File /'D://python//lib//site-packages//matplotlib//axes/_subplots.py/', line 76, in init/n self._axes_class.init(self, fig, self.figbox, **kwargs)/n File /'D://python//lib//site-packages//matplotlib//axes/_base.py/', line 435, in init/n self._shared_x_axes.join(self, sharex)/n File /'D://python//lib//site-packages//matplotlib//cbook//init.py/', line 849, in join/n set_b = mapping.get(weakref.ref(arg), [weakref.ref(arg)])/nTypeError: cannot create weak reference to 'bool' object/n怎么解决内容:这个报错可能是因为在使用subplot时,sharex和sharey参数被设置为了True或False,而它们应该是一个AxesSubplot对象。可以尝试将这些参数去掉,或者将它们设置为一个AxesSubplot对象。例如:/n/nplt.subplot(212)/nplt.plot(time2, samples2)/nplt.title(/'降噪后音频时域波形/')/nplt.xlabel(/'时长(秒)/')/nplt.ylabel(/'振幅/')/n/n或者:/n/nax1 = plt.subplot(211)/nplt.plot(time1, samples1)/nplt.title(/'原音频时域波形/')/nplt.xlabel(/'时长(秒)/')/nplt.ylabel(/'振幅/')/n/nax2 = plt.subplot(212, sharex=ax1, sharey=ax1)/nplt.plot(time2, samples2)/nplt.title(/'降噪后音频时域波形/')/nplt.xlabel(/'时长(秒)/')/nplt.ylabel(/'振幅/')/n
原文地址: https://www.cveoy.top/t/topic/n7M8 著作权归作者所有。请勿转载和采集!