Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -42,9 +42,9 @@ def redistribute_codes(row):
|
|
42 |
snac_model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz").to("cuda")
|
43 |
|
44 |
# Load the single-speaker language model
|
45 |
-
tokenizer = AutoTokenizer.from_pretrained('Llama-3B-Mono-Jim')
|
46 |
model = AutoModelForCausalLM.from_pretrained(
|
47 |
-
'Llama-3B-Mono-Jim', torch_dtype=torch.bfloat16
|
48 |
).cuda()
|
49 |
|
50 |
@spaces.GPU
|
@@ -76,7 +76,7 @@ def generate_audio(text, temperature, top_p, max_new_tokens):
|
|
76 |
# Gradio Interface
|
77 |
with gr.Blocks() as demo:
|
78 |
gr.Markdown("# Llama-3B-Mono-Jim - Single Speaker Audio Generation")
|
79 |
-
gr.Markdown("Generate speech audio using the `Llama-3B-Mono-Jim` model.")
|
80 |
|
81 |
with gr.Row():
|
82 |
text_input = gr.Textbox(lines=4, label="Input Text")
|
|
|
42 |
snac_model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz").to("cuda")
|
43 |
|
44 |
# Load the single-speaker language model
|
45 |
+
tokenizer = AutoTokenizer.from_pretrained('prithivMLmods/Llama-3B-Mono-Jim')
|
46 |
model = AutoModelForCausalLM.from_pretrained(
|
47 |
+
'prithivMLmods/Llama-3B-Mono-Jim', torch_dtype=torch.bfloat16
|
48 |
).cuda()
|
49 |
|
50 |
@spaces.GPU
|
|
|
76 |
# Gradio Interface
|
77 |
with gr.Blocks() as demo:
|
78 |
gr.Markdown("# Llama-3B-Mono-Jim - Single Speaker Audio Generation")
|
79 |
+
gr.Markdown("Generate speech audio using the `prithivMLmods/Llama-3B-Mono-Jim` model.")
|
80 |
|
81 |
with gr.Row():
|
82 |
text_input = gr.Textbox(lines=4, label="Input Text")
|