File size: 11,652 Bytes
8b482e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4d31c25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b482e1
 
4d31c25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364

"""

import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import json
import torch
import requests
import time
import random
from PIL import Image
from typing import Union
import os
import base64
from together import Together
import pathlib
import gradio_client as grc
import spaces

global shrd
shrd = gr.JSON(visible=False)

device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device}" if device != "cpu" else "Using CPU")

def _load_model():
    tokenizer = AutoTokenizer.from_pretrained("vikhyatk/moondream2", trust_remote_code=True, revision="2024-05-08", torch_dtype=(torch.bfloat16 if device == 'cuda' else torch.float32))
    model = AutoModelForCausalLM.from_pretrained("vikhyatk/moondream2", device_map=device, trust_remote_code=True, revision="2024-05-08")
    return (model, tokenizer)

class MoonDream():
    def __init__(self, model=None, tokenizer=None):
        self.model, self.tokenizer = (model, tokenizer)
        if not model or model is None or not tokenizer or tokenizer is None:
            self.model, self.tokenizer = _load_model()
        self.device = device
        self.model.to(self.device)
    def __call__(self, question, imgs):
        imn = 0
        for img in imgs:
            img = self.model.encode_image(img)
            res = self.model.answer_question(question=question, image_embeds=img, tokenizer=self.tokenizer)
            yield res
        return

md = MoonDream()

SYSTEM_PROMPT = "You are Llama 3 70b. You have been given access to Moondream 2 for VQA when given images. When you have a question about an image, simple start your response with the text, '@question\\nMy question?'. When you do this, the request will be sent to Moondream 2. User can see this happening if they turn debug on, so be professional and stay on topic. Any chat from anyone starting with @answer is the answer to last question asked. If something appears out of sync, ask User to clear the chat."

@spaces.GPU
def _respond_one(question, img):
  txt = ""
  yield (txt := txt + MoonDream()(question, [img]))
  return txt

def respond_batch(question, **imgs):
  md = MoonDream()
  for img in imgs.values():
    res = md(question, img)
    for r in res:
      yield r
    yield "\n\n\n\n\n\n"
  return

def dual_images(img1: Image):
  # Ran once for each img to it's respective output. Output should be detailed str of description/feature extraction/interrogation.
  md = MoonDream()
  res = md("Describe the image in plain english ", [img1])
  txt = ""
  for r in res:
    yield (txt := txt + r)
  return

import os

def merge_descriptions_to_prompt(mi, d1, d2):
  from together import Together
  tog = Together(api_key=os.getenv("TOGETHER_KEY"))
  res = tog.completions.create(prompt=f""" """Describe what would result if the following two descriptions were describing one thing.
### Description 1:
""" """
```text
{d1}
```
### Description 2:
```text
{d2}
```
Merge-Specific Instructions:
```text
{mi}
```
Ensure you end your output with ```\\n
---
Complete Description:
```text"""

""", model="meta-llama/Meta-Llama-3-70B", stop=["```"], max_tokens=1024)
  return res.choices[0].text.split("```")[0]

def xform_image_description(img, inst):
  #md = MoonDream()
  from together import Together
  desc = dual_images(img)
  tog = Together(api_key=os.getenv("TOGETHER_KEY"))
  prompt=f""" """Describe the image in aggressively verbose detail. I must know every freckle upon a man's brow and each blade of the grass intimately.\nDescription: ```text\n{desc}\n```\nInstructions:\n```text\n{inst}\n```\n\n\n---\nDetailed Description:\n```text """ """
  res = tog.completions.create(prompt=prompt, model="meta-llama/Meta-Llama-3-70B", stop=["```"], max_tokens=1024)
  return res.choices[0].text[len(prompt):].split("```")[0]

def simple_desc(img, prompt):
  import base64
  gen = md(prompt, [img])
  total = ""
  for resp in gen:
    print(total := total + resp)
  img.resize((192,192)).save("tmp.png")
  bts = False
  with open("tmp.png", "rb") as f:
    bts = f.read()
  if bts:
    os.remove("tmp.png")
  res = {
    'image_b64': base64.b64encode(bts).decode('utf-8'),
    'description': total,
  }
  cl = grc.Client("http://127.0.0.1:7860/")
  result = cl.predict(
    message="Here's the description of your latest image, repeat any relevant details to keep them in context. Here's the description:\n```text\n" + total + "\n```\n\nAnd what the user wanted to begin with: `" + prompt + "`.",
    api_name="/chat"
  )
  print(result)
  return total, res, {**res, 'chat': result}

ifc_imgprompt2text = gr.Interface(simple_desc, inputs=[gr.Image(label="input", type="pil"), gr.Textbox(label="prompt")], outputs=[gr.Textbox(label="description"), gr.JSON(label="json")])

def chat(inpt, mess, desc):
  from together import Together
  print(inpt, mess)
  if mess is None:
    mess = []

  tog = Together(api_key=os.getenv("TOGETHER_KEY"))
  messages = [{
    'role': 'system',
    'content': SYSTEM_PROMPT
  }]
  if desc is not None and desc != "":
    messages.append({
      'role': 'system',
      'content': 'Here is a description of what you can see at the moment:\n```text\n' + desc + '\n```\nKeep this in mind when answering User\'s questions.'
    })
  messages.append({
    'role': 'user',
    'content': inpt
  })
  for cht in mess:
    print(cht)
  res = tog.chat.completions.create(
  messages=messages,
  model="meta-llama/Llama-3-70b-chat-hf", stop=["<|eot_id|>"], stream=True, safety_model="Meta-LLama/Llama-Guard-7b")
  txt = ""
  for pk in res:
    print(pk)
    txt += pk.choices[0].delta.content
    #mess[-1][-2] += pk.choices[0].delta.content
    yield txt #, json.dumps(messages)#mess#, json.dumps(messages)

chatbot = gr.Chatbot(
  [
    ["Hello?", "### Greetings\n\nWell, it seems I have a visitor! What can I do for you? &lt3;\n\n---"]
  ],
  elem_id="chatbot",
  bubble_full_width=False,
  sanitize_html=False,
  show_copy_button=True,
  avatar_images=[
    pathlib.Path("image.jpeg"), 
    pathlib.Path("image2.jpeg")
])

wizard_chatbot = gr.Chatbot(
  [
    ["Hello?", "### Greetings\n\nWell, it seems I have a visitor! What can I do for you? &lt3;\n\n---"]
  ],
  elem_id="chatbot_wizard",
  bubble_full_width=True,
  sanitize_html=False,
  show_copy_button=True,
  avatar_images=[
    pathlib.Path("image.png"),
    pathlib.Path("image2.jpeg")
  ]
)

def wizard_chat(inpt, mess):
  from together import Together
  print(inpt, mess)
  if mess is None:
    mess = []

  tog = Together(api_key=os.getenv("TOGETHER_KEY"))
  messages = []
  messages.append({
    'role': 'user',
    'content': "English; Please reply in English. " + inpt
  })
  for cht in mess:
    print(cht)
  res = tog.chat.completions.create(
  messages=messages,
  model="microsoft/WizardLM-2-8x22B", stop=["</s>"], stream=True, safety_model="Meta-LLama/Llama-Guard-7b")
  txt = ""
  for pk in res:
    print(pk)
    txt += pk.choices[0].delta.content
    #mess[-1][-2] += pk.choices[0].delta.content
    yield txt #, json.dumps(messages)#mess#, json.dumps(messages

botroom = None

def group_chat(room: str, **models):
  wzn = json.loads(wzn)
  lmn = json.loads(lmn)
  print(wzn, lmn)
  if not "replace_token" in wzn:
    wzn["replace_token"] = "<|wizard|>"
  if not "replace_token" in lmn:
    lmn["replace_token"] = "</Llama>"
  while room.find(lmn['replace_token']) != -1 or room.find(wzn['replace_token']) != -1:
    if not "prompt" in wzn and room.find(wzn['replace_token']) != -1:
      wzn["prompt"] = room[0:room.find(wzn['replace_token'])]
    if not "prompt" in lmn and room.find(lmn['replace_token']) != -1:
      lmn["prompt"] = room[0:room.find(lmn['replace_token'])]
    print(wzn, lmn)
    if "prompt" in wzn:
      print(wzn)
      res = wizard_chat(wzn['prompt'], [])
      tx = ""
      for r in res:
        yield cdd + r
        tx = r
  return cdd + txt
  # Let's make a more genetic model-merge with shadow config that has basic sane defaults for any model.
  # top_k 42
  # top_p 0.842
  # max_tokens 1536
  # temperature 0.693
  
  shadow_config = {
    "top_k": 42,
    "top_p": 0.842,
    "max_tokens": 1536,
    "temperature": 0.693,
    "repetition_penalty": 1.12
  }

  #models = {#

#  }

arch_room = None

def wizard_complete(cdd, wzs):
  tog = Together(api_key=os.getenv("TOGETHER_KEY"))
  if wzs.startswith("root="):
    wzs = wzs[5:]
  wzs = json.loads(wzs)
  print(wzs)
  if not "stop" in wzs:
    wzs["stop"] = ['###', '\n\n\n', '<|im_end|>', '<|im_start|>']
  if not "model" in wzs:
    wzs["model"] = "WizardLM/WizardCoder-Python-34B-V1.0"
  if not "prompt" in wzs:
    wzs["prompt"] = cdd
  res = tog.completions.create(prompt=wzs["prompt"], model=wzs["model"], stop=wzs["stop"], max_tokens=1024, stream=False)
  txt = cdd + res.choices[0].text
  return txt, txt

with gr.Blocks() as arch_room:
  with gr.Row():
    gr.Markdown(f"""
## Arcanistry

"""
*POOF* -- You walk in, to a cloudy room filled with heavy smoke. In the center of the room rests a waist-height table. Upon the table, you see a... You don't understand... It's dark and light and cold and warm but... As you extend your hand, you hear the voice travel up your arm and into your ears...

---
""" """)
  with gr.Row():
    cdd = gr.Code(""" """### Human
I require a Python script that serves a simple file server in Python over MongoDB.

### Wizard
Sure! Here's the script:
```python""" """, language="markdown")
  with gr.Row():
    wzs = gr.Code(json.dumps({
      'token': '<|wizard|>',
      'model': 'WizardLM/WizardCoder-Python-34B-V1.0',
      'stop': ['###', '\n\n\n', '<|im_end|>', '<|im_start|>']
    }))
  with gr.Row():
    rnd = gr.Markdown("")
  with gr.Row():
    subm_prompt = gr.Button("Run Prompt")
    subm_prompt.click(wizard_complete, inputs=[cdd, wzs], outputs=[cdd, rnd])

with gr.TabbedInterface([ifc_imgprompt2text, c_ifc := gr.ChatInterface(chat, chatbot=chatbot, submit_btn=gr.Button(scale=1)), gr.ChatInterface(wizard_chat), arch_room], ["Prompt & Image 2 Text", "Chat w/ Llama 3 70b", "Chat w/ WizardLM 8x22B", "Arcanistry"]) as ifc:
  shrd = gr.JSON(visible=False)
  ifc.launch(share=False, debug=True, show_error=True) """

from transformers import AutoTokenizer, AutoModelForCausalLM
import gradio as gr
import spaces
from PIL import Image
import hashlib
import base64

def load_md2():
  model = AutoModelForCausalLM.from_pretrained("vikhyatk/moondream2", device_map="cpu", trust_remote_code=True,revision="2025-01-09")
  return model

global md2

md2 = load_md2()

@spaces.GPU()
def moondream2(question, image, history=None):
  global md2
  model = md2
  model.cuda()
  hsh = hashlib.sha256(bts := image.resize((224,224), Image.NEAREST).tobytes()).hexdigest()
  b64 = base64.b64encode(bts).decode('utf-8')
  res = model.query(image, question) if question is not None and question != "" else model.caption(image)
  model.cpu()
  ress = []
  if history is not None:
    for itm in history:
      ress.append(itm)
  ress.append({
    "answer": res["answer"] if question is not None and question != "" else None,
    "caption": res["caption"] if question is None or question == "" else None,
    "sha256": hsh,
    "image_b64": b64
  })
  return ress, ress

def gui():
  with gr.Blocks() as blk:
    with gr.Row():
        imgs = gr.Image(label="input", type="pil", elem_id="imgs")
    with gr.Row():
      txt = gr.Textbox(label="prompt")
    with gr.Row():
      btn = gr.Button("Run")
    with gr.Row():
      res = gr.JSON(label="output")
    with gr.Row(visible=False):
      history = gr.JSON(label="history")
    btn.click(moondream2, inputs=[txt, imgs, history], outputs=[res, history])
  blk.launch(share=False)

if __name__ == "__main__":
  gui()