prithivMLmods commited on
Commit
0939ab6
·
verified ·
1 Parent(s): f1f800c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -5
app.py CHANGED
@@ -17,6 +17,7 @@ from bird_species import bird_classification
17
  from alphabet_sign_language_detection import sign_language_classification
18
  from rice_leaf_disease import classify_leaf_disease
19
  from traffic_density import traffic_density_classification
 
20
 
21
  # Main classification function for multi-model classification.
22
  def classify(image, model_name):
@@ -48,6 +49,8 @@ def classify(image, model_name):
48
  return sign_language_classification(image)
49
  elif model_name == "traffic density":
50
  return traffic_density_classification(image)
 
 
51
  else:
52
  return {"Error": "No model selected"}
53
 
@@ -57,7 +60,7 @@ def select_model(model_name):
57
  "gender": "secondary", "emotion": "secondary", "dog breed": "secondary", "deepfake": "secondary",
58
  "gym workout": "secondary", "waste": "secondary", "age": "secondary", "mnist": "secondary",
59
  "fashion_mnist": "secondary", "food": "secondary", "bird": "secondary", "leaf disease": "secondary",
60
- "sign language": "secondary", "traffic density": "secondary"
61
  }
62
  model_variants[model_name] = "primary"
63
  return (model_name, *(gr.update(variant=model_variants[key]) for key in model_variants))
@@ -101,9 +104,9 @@ def infer(image, candidate_labels):
101
  sg1_probs, sg2_probs = siglip_detector(image, candidate_labels)
102
  return postprocess_siglip(sg1_probs, sg2_probs, labels=candidate_labels)
103
 
104
- # Build the Gradio Interface with two tab
105
  with gr.Blocks() as demo:
106
- gr.Markdown("# Multi-Model & Zero-Shot Classification Interface")
107
 
108
  with gr.Tabs():
109
  # Tab 1: Multi-Model Classification
@@ -125,14 +128,23 @@ with gr.Blocks() as demo:
125
  leaf_disease_btn = gr.Button("Rice Leaf Disease", variant="secondary")
126
  sign_language_btn = gr.Button("Alphabet Sign Language", variant="secondary")
127
  traffic_density_btn = gr.Button("Traffic Density", variant="secondary")
 
128
 
129
  selected_model = gr.State("age")
130
  gr.Markdown("### Current Model:")
131
  model_display = gr.Textbox(value="age", interactive=False)
132
  selected_model.change(lambda m: m, selected_model, model_display)
133
 
134
- buttons = [gender_btn, emotion_btn, dog_breed_btn, deepfake_btn, gym_workout_btn, waste_btn, age_btn, mnist_btn, fashion_mnist_btn, food_btn, bird_btn, leaf_disease_btn, sign_language_btn, traffic_density_btn]
135
- model_names = ["gender", "emotion", "dog breed", "deepfake", "gym workout", "waste", "age", "mnist", "fashion_mnist", "food", "bird", "leaf disease", "sign language", "traffic density"]
 
 
 
 
 
 
 
 
136
 
137
  for btn, name in zip(buttons, model_names):
138
  btn.click(fn=lambda n=name: select_model(n), inputs=[], outputs=[selected_model] + buttons)
 
17
  from alphabet_sign_language_detection import sign_language_classification
18
  from rice_leaf_disease import classify_leaf_disease
19
  from traffic_density import traffic_density_classification
20
+ from clip_art import clipart_classification # New import
21
 
22
  # Main classification function for multi-model classification.
23
  def classify(image, model_name):
 
49
  return sign_language_classification(image)
50
  elif model_name == "traffic density":
51
  return traffic_density_classification(image)
52
+ elif model_name == "clip art": # New option
53
+ return clipart_classification(image)
54
  else:
55
  return {"Error": "No model selected"}
56
 
 
60
  "gender": "secondary", "emotion": "secondary", "dog breed": "secondary", "deepfake": "secondary",
61
  "gym workout": "secondary", "waste": "secondary", "age": "secondary", "mnist": "secondary",
62
  "fashion_mnist": "secondary", "food": "secondary", "bird": "secondary", "leaf disease": "secondary",
63
+ "sign language": "secondary", "traffic density": "secondary", "clip art": "secondary" # New model variant
64
  }
65
  model_variants[model_name] = "primary"
66
  return (model_name, *(gr.update(variant=model_variants[key]) for key in model_variants))
 
104
  sg1_probs, sg2_probs = siglip_detector(image, candidate_labels)
105
  return postprocess_siglip(sg1_probs, sg2_probs, labels=candidate_labels)
106
 
107
+ # Build the Gradio Interface with two tabs.
108
  with gr.Blocks() as demo:
109
+ gr.Markdown("# Multi-Domain & Zero-Shot Image Classification")
110
 
111
  with gr.Tabs():
112
  # Tab 1: Multi-Model Classification
 
128
  leaf_disease_btn = gr.Button("Rice Leaf Disease", variant="secondary")
129
  sign_language_btn = gr.Button("Alphabet Sign Language", variant="secondary")
130
  traffic_density_btn = gr.Button("Traffic Density", variant="secondary")
131
+ clip_art_btn = gr.Button("Art Classification", variant="secondary") # New button
132
 
133
  selected_model = gr.State("age")
134
  gr.Markdown("### Current Model:")
135
  model_display = gr.Textbox(value="age", interactive=False)
136
  selected_model.change(lambda m: m, selected_model, model_display)
137
 
138
+ buttons = [
139
+ gender_btn, emotion_btn, dog_breed_btn, deepfake_btn, gym_workout_btn, waste_btn,
140
+ age_btn, mnist_btn, fashion_mnist_btn, food_btn, bird_btn, leaf_disease_btn,
141
+ sign_language_btn, traffic_density_btn, clip_art_btn # Include new button
142
+ ]
143
+ model_names = [
144
+ "gender", "emotion", "dog breed", "deepfake", "gym workout", "waste",
145
+ "age", "mnist", "fashion_mnist", "food", "bird", "leaf disease",
146
+ "sign language", "traffic density", "clip art" # New model name
147
+ ]
148
 
149
  for btn, name in zip(buttons, model_names):
150
  btn.click(fn=lambda n=name: select_model(n), inputs=[], outputs=[selected_model] + buttons)