rphrp1985 commited on
Commit
689cd53
·
verified ·
1 Parent(s): 54122f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -22
app.py CHANGED
@@ -11,7 +11,7 @@ ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
11
  model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
  torch_dtype=torch.bfloat16).to("cuda")
13
  processor = AutoProcessor.from_pretrained(ckpt)
14
-
15
 
16
  @spaces.GPU
17
  def bot_streaming(message, history, max_new_tokens=250):
@@ -25,31 +25,39 @@ def bot_streaming(message, history, max_new_tokens=250):
25
  images = []
26
 
27
 
28
- for i, msg in enumerate(history):
29
- if isinstance(msg[0], tuple):
30
- messages.append({"role": "user", "content": [{"type": "text", "text": history[i+1][0]}, {"type": "image"}]})
31
- messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
32
- images.append(Image.open(msg[0][0]).convert("RGB"))
33
- elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
34
- # messages are already handled
35
- pass
36
- elif isinstance(history[i-1][0], str) and isinstance(msg[0], str): # text only turn
37
- messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
38
- messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
39
 
40
- # add current message
41
- if len(message["files"]) == 1:
42
 
43
- if isinstance(message["files"][0], str): # examples
44
- image = Image.open(message["files"][0]).convert("RGB")
45
- else: # regular input
46
- image = Image.open(message["files"][0]["path"]).convert("RGB")
47
- images.append(image)
48
- messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image"}]})
49
- else:
50
- messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
51
 
52
 
 
 
 
 
 
 
 
 
53
  print("\n\nfinal messages ", messages)
54
 
55
  texts = processor.apply_chat_template(messages, add_generation_prompt=True)
 
11
  model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
  torch_dtype=torch.bfloat16).to("cuda")
13
  processor = AutoProcessor.from_pretrained(ckpt)
14
+ import requests
15
 
16
  @spaces.GPU
17
  def bot_streaming(message, history, max_new_tokens=250):
 
25
  images = []
26
 
27
 
28
+ # for i, msg in enumerate(history):
29
+ # if isinstance(msg[0], tuple):
30
+ # messages.append({"role": "user", "content": [{"type": "text", "text": history[i+1][0]}, {"type": "image"}]})
31
+ # messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
32
+ # images.append(Image.open(msg[0][0]).convert("RGB"))
33
+ # elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
34
+ # # messages are already handled
35
+ # pass
36
+ # elif isinstance(history[i-1][0], str) and isinstance(msg[0], str): # text only turn
37
+ # messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
38
+ # messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
39
 
40
+ # # add current message
41
+ # if len(message["files"]) == 1:
42
 
43
+ # if isinstance(message["files"][0], str): # examples
44
+ # image = Image.open(message["files"][0]).convert("RGB")
45
+ # else: # regular input
46
+ # image = Image.open(message["files"][0]["path"]).convert("RGB")
47
+ # images.append(image)
48
+ # messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image"}]})
49
+ # else:
50
+ # messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
51
 
52
 
53
+ messages= message['messages']
54
+ files = message['images']
55
+ for url in files:
56
+ response = requests.get(url)
57
+ img = Image.open(BytesIO(response.content)).convert("RGB")
58
+ images.append(img)
59
+
60
+
61
  print("\n\nfinal messages ", messages)
62
 
63
  texts = processor.apply_chat_template(messages, add_generation_prompt=True)