rphrp1985 commited on
Commit
8625475
·
verified ·
1 Parent(s): de8c49a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -25
app.py CHANGED
@@ -41,38 +41,72 @@ def url_to_base64(image_url):
41
  else:
42
  return ''
43
 
 
44
 
45
  @spaces.GPU(duration=90)
46
  def run_inference(message, history):
47
- ## may work
48
- messages = []
49
- images = []
50
- print('\n\nmessage ',message)
51
- print('\n\nhistoery ',history)
 
 
 
 
 
 
 
 
 
 
 
 
52
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- for couple in history:
55
- if type(couple[0]) is tuple:
56
- images += couple[0]
57
- elif couple[0][1]:
58
- messages.append(UserMessage(content = [ImageURLChunk(image_url=image_to_base64(path)) for path in images]+[TextChunk(text=couple[0][1])]))
59
- messages.append(AssistantMessage(content = couple[1]))
60
- images = []
61
- ##
62
 
63
- messages.append(UserMessage(content = [ImageURLChunk(image_url=image_to_base64(file["path"])) for file in message["files"]]+[TextChunk(text=message["text"])]))
64
 
65
- print('\n\nfinal messageds', messages)
66
- completion_request = ChatCompletionRequest(messages=messages)
67
-
68
- encoded = tokenizer.encode_chat_completion(completion_request)
69
-
70
- images = encoded.images
71
- tokens = encoded.tokens
72
-
73
- out_tokens, _ = generate([tokens], model, images=[images], max_tokens=512, temperature=0.45, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
74
- result = tokenizer.decode(out_tokens[0])
75
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
  demo = gr.ChatInterface(fn=run_inference, title="Pixtral 12B", multimodal=True, description="A demo chat interface with Pixtral 12B, deployed using Mistral Inference.")
78
  demo.queue().launch()
 
41
  else:
42
  return ''
43
 
44
+ import json
45
 
46
  @spaces.GPU(duration=90)
47
  def run_inference(message, history):
48
+
49
+ try:
50
+ messages= message['text']
51
+ print("messages ", messages)
52
+ messages = json.loads(messages)
53
+ final_msg=[]
54
+ for x in messages:
55
+ if x['role']=='user':
56
+ tmmp=[]
57
+ for y in x['content']:
58
+ if y['type']=='image':
59
+ tmmp+=[ImageURLChunk(image_url= url_to_base64(y['url'])]
60
+ else:
61
+ tmmp+=[TextChunk(text= y['text'] )]
62
+ final_msg.append(UserMessage(content =tmmp ) )
63
+ else:
64
+ final_msg.append(AssistantMessage(content = x['content'][0]['text'] ))
65
 
66
+ completion_request = ChatCompletionRequest(messages=final_msg)
67
+
68
+ encoded = tokenizer.encode_chat_completion(completion_request)
69
+
70
+ images = encoded.images
71
+ tokens = encoded.tokens
72
+
73
+ out_tokens, _ = generate([tokens], model, images=[images], max_tokens=512, temperature=0.45, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
74
+ result = tokenizer.decode(out_tokens[0])
75
+ return result
76
 
 
 
 
 
 
 
 
 
77
 
 
78
 
79
+ ## may work
80
+
81
+ except Exception as e:
82
+ messages = []
83
+ images = []
84
+ print('\n\nmessage ',message)
85
+ print('\n\nhistoery ',history)
86
+
87
+
88
+ for couple in history:
89
+ if type(couple[0]) is tuple:
90
+ images += couple[0]
91
+ elif couple[0][1]:
92
+ messages.append(UserMessage(content = [ImageURLChunk(image_url=image_to_base64(path)) for path in images]+[TextChunk(text=couple[0][1])]))
93
+ messages.append(AssistantMessage(content = couple[1]))
94
+ images = []
95
+ ##
96
+
97
+ messages.append(UserMessage(content = [ImageURLChunk(image_url=image_to_base64(file["path"])) for file in message["files"]]+[TextChunk(text=message["text"])]))
98
+
99
+ print('\n\nfinal messageds', messages)
100
+ completion_request = ChatCompletionRequest(messages=messages)
101
+
102
+ encoded = tokenizer.encode_chat_completion(completion_request)
103
+
104
+ images = encoded.images
105
+ tokens = encoded.tokens
106
+
107
+ out_tokens, _ = generate([tokens], model, images=[images], max_tokens=512, temperature=0.45, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
108
+ result = tokenizer.decode(out_tokens[0])
109
+ return result
110
 
111
  demo = gr.ChatInterface(fn=run_inference, title="Pixtral 12B", multimodal=True, description="A demo chat interface with Pixtral 12B, deployed using Mistral Inference.")
112
  demo.queue().launch()