jiuhai commited on
Commit
d2a5bbc
·
1 Parent(s): 1089a55
app.py CHANGED
@@ -51,7 +51,7 @@ model_name = get_model_name_from_path(args.model_path)
51
  model_kwargs = {
52
  "trust_remote_code": True,
53
  "torch_dtype": torch.bfloat16,
54
- "attn_implementation": "flash_attention_2"
55
  }
56
  tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, device_map="cuda:0", **model_kwargs)
57
  our_chatbot = None
@@ -155,11 +155,13 @@ def generate(state, imagebox, textbox, image_process_mode, temperature, top_p, m
155
  streamer=streamer,
156
  use_cache=True,
157
  pad_token_id=tokenizer.eos_token_id,
 
158
  **image_args
159
  ))
160
  thread.start()
161
  generated_text = ''
162
  for new_text in streamer:
 
163
  generated_text += new_text
164
  if generated_text.endswith(stop_str):
165
  generated_text = generated_text[:-len(stop_str)]
 
51
  model_kwargs = {
52
  "trust_remote_code": True,
53
  "torch_dtype": torch.bfloat16,
54
+ "attn_implementation": "eager"
55
  }
56
  tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, device_map="cuda:0", **model_kwargs)
57
  our_chatbot = None
 
155
  streamer=streamer,
156
  use_cache=True,
157
  pad_token_id=tokenizer.eos_token_id,
158
+ eos_token_id=[32007],
159
  **image_args
160
  ))
161
  thread.start()
162
  generated_text = ''
163
  for new_text in streamer:
164
+ new_text = new_text.replace('<|end|>', "")
165
  generated_text += new_text
166
  if generated_text.endswith(stop_str):
167
  generated_text = generated_text[:-len(stop_str)]
llava/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (218 Bytes). View file
 
llava/__pycache__/constants.cpython-310.pyc ADDED
Binary file (484 Bytes). View file
 
llava/__pycache__/conversation.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llava/__pycache__/mm_utils.cpython-310.pyc ADDED
Binary file (8.97 kB). View file
 
llava/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.02 kB). View file
 
llava/model/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (539 Bytes). View file
 
llava/model/__pycache__/builder.cpython-310.pyc ADDED
Binary file (5.35 kB). View file
 
llava/model/__pycache__/llava_arch.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
llava/model/language_model/__pycache__/llava_llama.cpython-310.pyc ADDED
Binary file (3.81 kB). View file
 
llava/model/language_model/__pycache__/llava_mistral.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
llava/model/language_model/__pycache__/llava_mpt.cpython-310.pyc ADDED
Binary file (3.16 kB). View file
 
llava/model/language_model/__pycache__/llava_phi3.cpython-310.pyc ADDED
Binary file (3.77 kB). View file
 
llava/model/multimodal_encoder/__pycache__/builder.cpython-310.pyc ADDED
Binary file (648 Bytes). View file
 
llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-310.pyc ADDED
Binary file (5.69 kB). View file
 
llava/model/multimodal_projector/__pycache__/builder.cpython-310.pyc ADDED
Binary file (2.69 kB). View file
 
llava/train/__pycache__/llava_trainer.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
llava/train/__pycache__/train.cpython-310.pyc ADDED
Binary file (33.3 kB). View file