voice-match / app.py
freddyaboulton's picture
Help
2ffd15f
raw
history blame
1.62 kB
import gradio as gr
from huggingface_hub import hf_hub_download
from audio_index import AudioEmbeddingSystem
from search import search
import pandas as pd
import numpy as np
db_file = hf_hub_download(
repo_id="freddyaboulton/common-voice-english-audio",
filename="audio_db.sqlite",
repo_type="dataset",
)
index_file = hf_hub_download(
repo_id="freddyaboulton/common-voice-english-audio",
filename="audio_faiss.index",
repo_type="dataset",
)
audio_embedding_system = AudioEmbeddingSystem(db_path=db_file, index_path=index_file)
def audio_search(audio_tuple):
sample_rate, array = audio_tuple
if array.dtype == np.int16:
array = array.astype(np.float32) / 32768.0
rows = audio_embedding_system.search((sample_rate, array))
print(rows)
orig_rows = search(rows)
for row in rows:
path = row["path"]
for orig in orig_rows:
orig_row = orig["row"]
print(orig_row)
if orig_row["path"] == path:
row["sentence"] = orig_row["sentence"]
row["audio"] = [
"<audio src=" + orig_row["audio"][0]["src"] + " controls />"
]
return pd.DataFrame(rows)[["path", "audio", "sentence", "distance"]].sort_values(by="distance", ascending=True)
demo = gr.Interface(
fn=audio_search,
inputs=gr.Audio(
label="Record or upload a clip of your voice", sources=["microphone", "upload"]
),
outputs=gr.Dataframe(
headers=["path", "audio", "sentence", "distance"],
datatype=["str", "html", "str", "number"],
),
)
demo.launch()