ovi054 commited on
Commit
fa8e3c4
1 Parent(s): 58952e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -23
app.py CHANGED
@@ -4,9 +4,9 @@ import torch
4
  from PIL import Image
5
  from transformers import AutoProcessor, AutoModelForCausalLM
6
 
7
- import os
8
- import random
9
- from gradio_client import Client
10
 
11
 
12
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
@@ -16,7 +16,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
16
  florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
17
  florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
18
 
19
- api_key = os.getenv("HF_READ_TOKEN")
20
 
21
  def generate_caption(image):
22
  if not isinstance(image, Image.Image):
@@ -39,28 +39,30 @@ def generate_caption(image):
39
  )
40
  prompt = parsed_answer["<MORE_DETAILED_CAPTION>"]
41
  print("Generation completed!:"+ prompt)
42
- yield prompt, None
43
- image_path = generate_image(prompt,random.randint(0, 4294967296))
44
- yield prompt, image_path
 
45
 
46
- def generate_image(prompt, seed=42, width=1024, height=1024):
47
- try:
48
- result = Client("KingNish/Realtime-FLUX", hf_token=api_key).predict(
49
- prompt=prompt,
50
- seed=seed,
51
- width=width,
52
- height=height,
53
- api_name="/generate_image"
54
- )
55
- # Extract the image path from the result tuple
56
- image_path = result[0]
57
- return image_path
58
- except Exception as e:
59
- raise Exception(f"Error generating image: {str(e)}")
60
 
61
  io = gr.Interface(generate_caption,
62
  inputs=[gr.Image(label="Input Image")],
63
- outputs = [gr.Textbox(label="Output Prompt", lines=3, show_copy_button = True),
64
- gr.Image(label="Output Image")]
 
65
  )
66
  io.launch(debug=True)
 
4
  from PIL import Image
5
  from transformers import AutoProcessor, AutoModelForCausalLM
6
 
7
+ # import os
8
+ # import random
9
+ # from gradio_client import Client
10
 
11
 
12
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
 
16
  florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
17
  florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
18
 
19
+ # api_key = os.getenv("HF_READ_TOKEN")
20
 
21
  def generate_caption(image):
22
  if not isinstance(image, Image.Image):
 
39
  )
40
  prompt = parsed_answer["<MORE_DETAILED_CAPTION>"]
41
  print("Generation completed!:"+ prompt)
42
+ return prompt
43
+ # yield prompt, None
44
+ # image_path = generate_image(prompt,random.randint(0, 4294967296))
45
+ # yield prompt, image_path
46
 
47
+ # def generate_image(prompt, seed=42, width=1024, height=1024):
48
+ # try:
49
+ # result = Client("KingNish/Realtime-FLUX", hf_token=api_key).predict(
50
+ # prompt=prompt,
51
+ # seed=seed,
52
+ # width=width,
53
+ # height=height,
54
+ # api_name="/generate_image"
55
+ # )
56
+ # # Extract the image path from the result tuple
57
+ # image_path = result[0]
58
+ # return image_path
59
+ # except Exception as e:
60
+ # raise Exception(f"Error generating image: {str(e)}")
61
 
62
  io = gr.Interface(generate_caption,
63
  inputs=[gr.Image(label="Input Image")],
64
+ outputs = [gr.Textbox(label="Output Prompt", lines=2, show_copy_button = True),
65
+ # gr.Image(label="Output Image")
66
+ ]
67
  )
68
  io.launch(debug=True)