intfloat commited on
Commit
078d6bc
·
1 Parent(s): 97fe6cd

Set use_cache to false

Browse files
Files changed (2) hide show
  1. README.md +2 -10
  2. config.json +1 -1
README.md CHANGED
@@ -44,7 +44,7 @@ pip install -r requirements.txt
44
 
45
  Then you can enter the directory to run the following command.
46
  ```python
47
- from transformers import MllamaForConditionalGeneration, AutoProcessor, AutoConfig
48
  import torch
49
  from PIL import Image
50
 
@@ -64,17 +64,9 @@ model_name = "intfloat/mmE5-mllama-11b-instruct"
64
 
65
  # Load Processor and Model
66
  processor = AutoProcessor.from_pretrained(model_name)
67
- processor.tokenizer.padding_side = "right"
68
-
69
- config = AutoConfig.from_pretrained(model_name)
70
- if hasattr(config, 'use_cache'):
71
- config.use_cache = False
72
- config.padding_side = "right"
73
  model = MllamaForConditionalGeneration.from_pretrained(
74
- model_name, config=config,
75
- torch_dtype=torch.bfloat16
76
  ).to("cuda")
77
- model.padding_side = "right"
78
  model.eval()
79
 
80
  # Image + Text -> Text
 
44
 
45
  Then you can enter the directory to run the following command.
46
  ```python
47
+ from transformers import MllamaForConditionalGeneration, AutoProcessor
48
  import torch
49
  from PIL import Image
50
 
 
64
 
65
  # Load Processor and Model
66
  processor = AutoProcessor.from_pretrained(model_name)
 
 
 
 
 
 
67
  model = MllamaForConditionalGeneration.from_pretrained(
68
+ model_name, torch_dtype=torch.bfloat16
 
69
  ).to("cuda")
 
70
  model.eval()
71
 
72
  # Image + Text -> Text
config.json CHANGED
@@ -96,7 +96,7 @@
96
  "torchscript": false,
97
  "typical_p": 1.0,
98
  "use_bfloat16": false,
99
- "use_cache": true,
100
  "vocab_size": 128256
101
  },
102
  "torch_dtype": "bfloat16",
 
96
  "torchscript": false,
97
  "typical_p": 1.0,
98
  "use_bfloat16": false,
99
+ "use_cache": false,
100
  "vocab_size": 128256
101
  },
102
  "torch_dtype": "bfloat16",