merve HF staff commited on
Commit
db8a6e8
1 Parent(s): 3d139ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -17,7 +17,6 @@ quantization_config = BitsAndBytesConfig(
17
  pipe = pipeline("image-to-text", model=model_id, model_kwargs={"quantization_config": quantization_config})
18
 
19
 
20
- DESCRIPTION = "LLaVA is now available in transformers!"
21
 
22
  def extract_response_pairs(text):
23
  pattern = re.compile(r'(USER:.*?)ASSISTANT:(.*?)(?:$|USER:)', re.DOTALL)
@@ -61,10 +60,12 @@ css = """
61
  """
62
  with gr.Blocks(css="style.css") as demo:
63
  gr.Markdown(DESCRIPTION)
 
64
  chatbot = gr.Chatbot(label="Chat", show_label=False)
 
65
  with gr.Row():
66
  image = gr.Image(type="pil")
67
- text_input = gr.Text(label="Chat Input", show_label=False, max_lines=1, container=False)
68
 
69
  history_chat = gr.State(value=[])
70
  with gr.Row():
@@ -76,7 +77,7 @@ with gr.Blocks(css="style.css") as demo:
76
  minimum=1,
77
  maximum=200,
78
  step=1,
79
- value=100,
80
  )
81
 
82
  chat_output = [
 
17
  pipe = pipeline("image-to-text", model=model_id, model_kwargs={"quantization_config": quantization_config})
18
 
19
 
 
20
 
21
  def extract_response_pairs(text):
22
  pattern = re.compile(r'(USER:.*?)ASSISTANT:(.*?)(?:$|USER:)', re.DOTALL)
 
60
  """
61
  with gr.Blocks(css="style.css") as demo:
62
  gr.Markdown(DESCRIPTION)
63
+ gr.Markdown("LLaVA is now available in transformers with 4-bit quantization ⚡️")
64
  chatbot = gr.Chatbot(label="Chat", show_label=False)
65
+ gr.Markdown("Input image and text to start chatting 👇 ")
66
  with gr.Row():
67
  image = gr.Image(type="pil")
68
+ text_input = gr.Text(label="Chat Input", max_lines=1)
69
 
70
  history_chat = gr.State(value=[])
71
  with gr.Row():
 
77
  minimum=1,
78
  maximum=200,
79
  step=1,
80
+ value=150,
81
  )
82
 
83
  chat_output = [