Yeluo0204 commited on
Commit
148f5fc
·
verified ·
1 Parent(s): a4b9b41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -37
app.py CHANGED
@@ -5,9 +5,6 @@ from scipy.io import wavfile
5
  import gradio as gr
6
  from inference import EnsembleDemucsMDXMusicSeparationModel, predict_with_model
7
  import torch
8
- import librosa
9
- import librosa.display
10
- import matplotlib.pyplot as plt
11
  import time
12
 
13
  # 检查文件是否准备好
@@ -24,28 +21,6 @@ def check_file_readiness(filepath):
24
  time.sleep(0.5)
25
  return True
26
 
27
- # 生成音频的频谱图
28
- def generate_spectrogram(audio_file_path):
29
- y, sr = librosa.load(audio_file_path)
30
- plt.figure(figsize=(10, 4))
31
- S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128, fmax=8000)
32
- librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
33
- y_axis='mel', fmax=8000, x_axis='time')
34
- plt.colorbar(format='%+2.0f dB')
35
- plt.title('Mel spectrogram')
36
- plt.tight_layout()
37
- image_path = tempfile.mktemp('.png')
38
- plt.savefig(image_path)
39
- plt.close()
40
- return image_path
41
-
42
- # 为多个音频文件生成频谱图
43
- def generate_spectrograms(audio_files):
44
- output_spectrograms = []
45
- for audio_file in audio_files:
46
- output_spectrograms.append(generate_spectrogram(audio_file))
47
- return tuple(output_spectrograms)
48
-
49
  # 音乐分离的包装函数
50
  def separate_music_file_wrapper(uploaded_files, use_cpu, use_single_onnx, large_overlap, small_overlap, chunk_size, use_large_gpu):
51
  input_files = [uploaded_files.name] # 获取上传文件的路径
@@ -96,14 +71,10 @@ def separate_music_file_wrapper(uploaded_files, use_cpu, use_single_onnx, large_
96
  wavfile.write(empty_file, 44100, empty_data.astype(np.int16)) # 转换为int16,因为wavfile不支持float32
97
  output_files_ready.append(empty_file)
98
 
99
- # 在分离音频后立即生成频谱图
100
- output_spectrograms = generate_spectrograms(output_files_ready)
101
-
102
  print(len(output_files_ready)) # 应输出6
103
- print(len(output_spectrograms)) # 应输出6
104
 
105
  print("返回前")
106
- return tuple(output_files_ready) + output_spectrograms
107
  print("返回后")
108
 
109
  description = """
@@ -134,22 +105,16 @@ with gr.Blocks(theme=theme) as demo:
134
  process_button = gr.Button("处理音频")
135
 
136
  vocals = gr.Audio(label="人声")
137
- vocals_spectrogram = gr.Image(label="人声频谱图")
138
  instrumental = gr.Audio(label="伴奏")
139
- instrumental_spectrogram = gr.Image(label="伴奏频谱图")
140
  instrumental2 = gr.Audio(label="伴奏2")
141
- instrumental2_spectrogram = gr.Image(label="伴奏2频谱图")
142
  bass = gr.Audio(label="贝斯")
143
- bass_spectrogram = gr.Image(label="贝斯频谱图")
144
  drums = gr.Audio(label="鼓声")
145
- drums_spectrogram = gr.Image(label="鼓声频谱图")
146
  other = gr.Audio(label="其他")
147
- other_spectrogram = gr.Image(label="其他频谱图")
148
 
149
  process_button.click(
150
  separate_music_file_wrapper,
151
  inputs=[uploaded_file, use_cpu, use_single_onnx, large_overlap, small_overlap, chunk_size, use_large_gpu],
152
- outputs=[vocals, instrumental, instrumental2, bass, drums, other, vocals_spectrogram, instrumental_spectrogram, instrumental2_spectrogram, bass_spectrogram, drums_spectrogram, other_spectrogram],
153
  )
154
 
155
  demo.queue().launch(debug=True, share=False)
 
5
  import gradio as gr
6
  from inference import EnsembleDemucsMDXMusicSeparationModel, predict_with_model
7
  import torch
 
 
 
8
  import time
9
 
10
  # 检查文件是否准备好
 
21
  time.sleep(0.5)
22
  return True
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  # 音乐分离的包装函数
25
  def separate_music_file_wrapper(uploaded_files, use_cpu, use_single_onnx, large_overlap, small_overlap, chunk_size, use_large_gpu):
26
  input_files = [uploaded_files.name] # 获取上传文件的路径
 
71
  wavfile.write(empty_file, 44100, empty_data.astype(np.int16)) # 转换为int16,因为wavfile不支持float32
72
  output_files_ready.append(empty_file)
73
 
 
 
 
74
  print(len(output_files_ready)) # 应输出6
 
75
 
76
  print("返回前")
77
+ return tuple(output_files_ready)
78
  print("返回后")
79
 
80
  description = """
 
105
  process_button = gr.Button("处理音频")
106
 
107
  vocals = gr.Audio(label="人声")
 
108
  instrumental = gr.Audio(label="伴奏")
 
109
  instrumental2 = gr.Audio(label="伴奏2")
 
110
  bass = gr.Audio(label="贝斯")
 
111
  drums = gr.Audio(label="鼓声")
 
112
  other = gr.Audio(label="其他")
 
113
 
114
  process_button.click(
115
  separate_music_file_wrapper,
116
  inputs=[uploaded_file, use_cpu, use_single_onnx, large_overlap, small_overlap, chunk_size, use_large_gpu],
117
+ outputs=[vocals, instrumental, instrumental2, bass, drums, other],
118
  )
119
 
120
  demo.queue().launch(debug=True, share=False)