語音分離模型:mossfomer2
- 計算短時能量
def compute_short_time_energy(audio: np.ndarray, frame_size: int, hop_size: int) -> np.ndarray:"""計算音頻信號的短時能量+ 將音頻分為若干幀,每一幀長度為 frame_size, 幀與幀之間以 hop_size 的步長滑動。+ 每一幀的能量定義為該幀所有采樣點的平方和。Args:audio (array): 1D音頻序列frame_size (int): 幀長: 多少秒內的采樣點數hop_size (int): 幀移: 多少秒內的采樣點數Returns:array: 能量數組,每個元素對應一幀的能量"""num_frames = int(np.ceil((len(audio) - frame_size) / hop_size)) + 1energy = np.zeros(num_frames)for i in range(num_frames):start = i * hop_sizeend = min(start + frame_size, len(audio))frame = audio[start:end]energy[i] = np.sum(frame ** 2)return energy
- 主聲源提取
def segment_audio(audio, sr, energy, threshold):"""利用短時能量定位音頻中的主聲源區域Args:audio (array): 1D音頻序列sr (int): 采樣率energy (array): 每幀的短時能量數組threshold (float): 能量閾值,低于該值認為振幅較低Returns:list: 列表,每個元組包含 (開始時間, 結束時間, 主聲源片段, 短時能量值)"""segments = []acc_energy = []in_segment = Falsestart_frame = Nonelast_active_frame = None# 遍歷每一幀能量,檢測連續區域for i, e in enumerate(energy):if e >= threshold:# 當前幀能量達到閾值,若尚未進入段則記錄起始幀if not in_segment:in_segment = Truestart_frame = ilast_active_frame = i # 更新最后一次達到閾值的幀acc_energy.append(e)else:# 當前幀能量低于閾值且之前處于主聲源區域,完成一次分段if in_segment:start_sample = start_frame * hop_size# 以最后一次能量達到閾值的幀末尾作為結束位置end_sample = min(last_active_frame * hop_size + frame_size, len(audio))segments.append((start_sample / sr, end_sample / sr, audio[start_sample:end_sample], np.mean(acc_energy)))in_segment = Falsestart_frame = Nonelast_active_frame = None# 若音頻結束時仍在主聲源區域,則補全該段if in_segment:start_sample = start_frame * hop_sizeend_sample = len(audio)segments.append((start_sample / sr, end_sample / sr, audio[start_sample:end_sample]))return segmentsdef save_segments(segments, sr, output_dir="segments"):"""保存檢測到的主聲源片段為獨立的音頻文件,并合并保存所有片段Args:segments (list): 分段結果列表,每個元組為 (開始時間, 結束時間, 主聲源片段)sr (int): 采樣率output_dir (str, optional): 保存的目錄(默認保存到 "segments" 文件夾)"""if not os.path.exists(output_dir):os.makedirs(output_dir)merged_audio_list = [seg[-2] for seg in segments]# 合并所有片段并保存為一個文件if merged_audio_list:merged_audio = np.concatenate(merged_audio_list)merged_file = os.path.join(output_dir, "merged_segments.wav")sf.write(merged_file, merged_audio, sr)print(f"已保存合并文件:{merged_file}")
# audio_file = "/home/wangguisen/projects/voice_separation/output/單軌音頻/普通單軌音頻_5_spk0.wav"audio_file = "/home/wangguisen/projects/voice_separation/2-.wav"audio, sr = sf.read(audio_file)# 參數設置:例如幀長20ms,幀移10msframe_size = int(0.02 * sr) # 20ms 對應的采樣點數hop_size = int(0.01 * sr) # 10ms 對應的采樣點數print("[DEBUG] frame_size: ", frame_size)print("[DEBUG] hop_size: ", hop_size)# 設置能量閾值(根據實際情況調整),低于此值認為振幅較低threshold = 0.001# 計算短時能量energy = compute_short_time_energy(audio, frame_size, hop_size)# 利用短時能量分段定位主聲源segments = segment_audio(audio, sr, energy, threshold)# 輸出檢測到的主聲源區間print("檢測到的主聲源區間:")for seg in segments:start, end, _, e = segprint(f"開始時間: {start:.3f} s, 結束時間: {end:.3f} s, 短時能量值: {e: .3f}")# 保存每段主聲源到獨立文件,并合并保存所有片段save_segments(segments, sr, output_dir="segments")