Lambeckkk commited on
Commit
ada9672
·
verified ·
1 Parent(s): 6bc45b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -24
app.py CHANGED
@@ -1,29 +1,17 @@
 
1
  import gradio as gr
2
- import torchaudio
3
- import torch
4
- from transformers import AutoProcessor, MusicgenForConditionalGeneration
5
 
6
- # Load MusicGen model
7
- model_name = "facebook/musicgen-small"
8
- model = MusicgenForConditionalGeneration.from_pretrained(model_name)
9
- processor = AutoProcessor.from_pretrained(model_name)
10
- model.to("cuda" if torch.cuda.is_available() else "cpu")
11
 
12
- def generate_music(description):
13
- inputs = processor(text=[description], return_tensors="pt")
14
- inputs = {k: v.to(model.device) for k, v in inputs.items()}
15
- audio_values = model.generate(**inputs, max_new_tokens=256)
16
- audio = processor.decode(audio_values[0], sampling_rate=16000)
17
- torchaudio.save("output.wav", audio.unsqueeze(0), 16000)
18
- return "output.wav"
19
 
20
- # Gradio UI
21
- demo = gr.Interface(
22
  fn=generate_music,
23
- inputs=gr.Textbox(label="Describe your song"),
24
- outputs=gr.Audio(label="Generated Track"),
25
- title="LarynxLab MVP",
26
- description="Type a music idea and get a short AI instrumental."
27
- )
28
-
29
- demo.launch()
 
1
+ from diffusers import DiffusionPipeline
2
  import gradio as gr
 
 
 
3
 
4
+ pipe = DiffusionPipeline.from_pretrained("riffusion/riffusion-model-v1")
5
+ pipe.to("cpu") # Safe for any system
 
 
 
6
 
7
+ def generate_music(prompt):
8
+ output = pipe(prompt)
9
+ audio = output["audio"]
10
+ return audio
 
 
 
11
 
12
+ gr.Interface(
 
13
  fn=generate_music,
14
+ inputs=gr.Textbox(label="Describe your music"),
15
+ outputs=gr.Audio(label="AI Song"),
16
+ title="LarynxLab - Riffusion Edition"
17
+ ).launch()