Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,931 Bytes
f3b1002 f347918 0358302 c4fcb59 cd3c848 e354e80 d443926 f3b1002 0358302 455a710 0358302 c4fcb59 cd3c848 f347918 455a710 0358302 c4fcb59 cd3c848 f347918 dbd1461 f347918 d443926 f347918 c4fcb59 f347918 0358302 c4fcb59 cd3c848 c4fcb59 d443926 f347918 0358302 e128767 d443926 0358302 c4fcb59 cd3c848 0358302 f347918 d2ca184 e128767 d2ca184 f3b1002 f347918 c4fcb59 f347918 e128767 f347918 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
import gradio as gr
from gender_classification import gender_classification
from emotion_classification import emotion_classification
from dog_breed import dog_breed_classification
from deepfake_vs_real import deepfake_classification
from gym_workout_classification import gym_workout_classification
# Functions to update the model state when a button is clicked.
def select_gender():
return "gender"
def select_emotion():
return "emotion"
def select_dog_breed():
return "dog breed"
def select_deepfake():
return "deepfake"
def select_gym_workout():
return "gym workout"
# Main classification function that calls the appropriate model based on selection.
def classify(image, model_name):
if model_name == "gender":
return gender_classification(image)
elif model_name == "emotion":
return emotion_classification(image)
elif model_name == "dog breed":
return dog_breed_classification(image)
elif model_name == "deepfake":
return deepfake_classification(image)
elif model_name == "gym workout":
return gym_workout_classification(image)
else:
return {"Error": "No model selected"}
with gr.Blocks() as demo:
# Sidebar with title and model selection buttons.
with gr.Sidebar():
gr.Markdown("# SigLIP2 Classification")
with gr.Row():
gender_btn = gr.Button("Gender Classification")
emotion_btn = gr.Button("Emotion Classification")
dog_breed_btn = gr.Button("Dog Breed Classification")
deepfake_btn = gr.Button("Deepfake vs Real")
gym_workout_btn = gr.Button("Gym Workout Classification")
# State to hold the current model choice.
selected_model = gr.State("gender")
# Set model state when buttons are clicked.
gender_btn.click(fn=select_gender, inputs=[], outputs=selected_model)
emotion_btn.click(fn=select_emotion, inputs=[], outputs=selected_model)
dog_breed_btn.click(fn=select_dog_breed, inputs=[], outputs=selected_model)
deepfake_btn.click(fn=select_deepfake, inputs=[], outputs=selected_model)
gym_workout_btn.click(fn=select_gym_workout, inputs=[], outputs=selected_model)
gr.Markdown("### Current Model:")
model_display = gr.Textbox(value="gender", interactive=False)
# Update display when state changes.
selected_model.change(lambda m: m, selected_model, model_display)
# Main interface: image input, analyze button, and prediction output.
with gr.Column():
image_input = gr.Image(type="numpy", label="Upload Image")
analyze_btn = gr.Button("Analyze")
output_label = gr.Label(label="Prediction Scores")
# When the "Analyze" button is clicked, use the selected model to classify the image.
analyze_btn.click(fn=classify, inputs=[image_input, selected_model], outputs=output_label)
demo.launch() |