Severian commited on
Commit
4bed28f
·
verified ·
1 Parent(s): 09cb30b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -42,19 +42,19 @@ Since this is a base model the IKM dataset greatly affects the output. The IKM d
42
  import torch
43
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
44
 
45
- # Load model in 8-bit precision
46
  quantization_config = BitsAndBytesConfig(
47
- load_in_8bit=True,
48
  llm_int8_skip_modules=["mamba"]
49
  )
50
  model = AutoModelForCausalLM.from_pretrained(
51
- "ai21labs/Jamba-v0.1",
52
  trust_remote_code=True,
53
  torch_dtype=torch.bfloat16,
54
  attn_implementation="flash_attention_2",
55
  quantization_config=quantization_config
56
  )
57
- tokenizer = AutoTokenizer.from_pretrained("ai21labs/Jamba-v0.1")
58
 
59
  # Tokenize input
60
  prompt = """How could we use cheese to reignite the sun? Answer:"""
 
42
  import torch
43
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
44
 
45
+ # Load model in 4-bit precision
46
  quantization_config = BitsAndBytesConfig(
47
+ load_in_4bit=True,
48
  llm_int8_skip_modules=["mamba"]
49
  )
50
  model = AutoModelForCausalLM.from_pretrained(
51
+ "Severian/Jamba-Nexus-IKM-v1",
52
  trust_remote_code=True,
53
  torch_dtype=torch.bfloat16,
54
  attn_implementation="flash_attention_2",
55
  quantization_config=quantization_config
56
  )
57
+ tokenizer = AutoTokenizer.from_pretrained("Severian/Jamba-Nexus-IKM-v1")
58
 
59
  # Tokenize input
60
  prompt = """How could we use cheese to reignite the sun? Answer:"""