AustingDong commited on
Commit
a25a8bd
·
1 Parent(s): 789cbe1

change llava model

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. demo/model_utils.py +5 -6
app.py CHANGED
@@ -286,7 +286,7 @@ with gr.Blocks() as demo:
286
  activation_map_output = gr.Gallery(label="activation Map", height=300, columns=1)
287
 
288
  with gr.Column():
289
- model_selector = gr.Dropdown(choices=["Clip", "ChartGemma-3B", "Janus-1B", "Janus-7B", "LLaVA-v1.6-Mistral-7B"], value="Clip", label="model")
290
  response_type = gr.Dropdown(choices=["Visualization only"], value="Visualization only", label="response_type")
291
  focus = gr.Dropdown(choices=["Visual Encoder"], value="Visual Encoder", label="focus")
292
  activation_map_method = gr.Dropdown(choices=["GradCAM"], value="GradCAM", label="activation map type")
 
286
  activation_map_output = gr.Gallery(label="activation Map", height=300, columns=1)
287
 
288
  with gr.Column():
289
+ model_selector = gr.Dropdown(choices=["Clip", "ChartGemma-3B", "Janus-1B", "Janus-7B", "LLaVA-v1.6-7B"], value="Clip", label="model")
290
  response_type = gr.Dropdown(choices=["Visualization only"], value="Visualization only", label="response_type")
291
  focus = gr.Dropdown(choices=["Visual Encoder"], value="Visual Encoder", label="focus")
292
  activation_map_method = gr.Dropdown(choices=["GradCAM"], value="GradCAM", label="activation map type")
demo/model_utils.py CHANGED
@@ -120,7 +120,7 @@ class LLaVA_Utils(Model_Utils):
120
  def init_LLaVA(self):
121
 
122
  # model_path = "llava-hf/llava-1.5-7b-hf"
123
- model_path = "llava-hf/llava-v1.6-mistral-7b-hf"
124
  config = AutoConfig.from_pretrained(model_path)
125
 
126
  self.vl_gpt = LlavaNextForConditionalGeneration.from_pretrained(model_path,
@@ -138,11 +138,10 @@ class LLaVA_Utils(Model_Utils):
138
  def prepare_inputs(self, question, image):
139
  conversation = [
140
  {
141
-
142
- "role": "user",
143
- "content": [
144
- {"type": "text", "text": question},
145
- {"type": "image"},
146
  ],
147
  },
148
  ]
 
120
  def init_LLaVA(self):
121
 
122
  # model_path = "llava-hf/llava-1.5-7b-hf"
123
+ model_path = "llava-hf/llava-v1.6-vicuna-7b-hf"
124
  config = AutoConfig.from_pretrained(model_path)
125
 
126
  self.vl_gpt = LlavaNextForConditionalGeneration.from_pretrained(model_path,
 
138
  def prepare_inputs(self, question, image):
139
  conversation = [
140
  {
141
+ "role": "user",
142
+ "content": [
143
+ {"type": "text", "text": question},
144
+ {"type": "image"},
 
145
  ],
146
  },
147
  ]