aigeek0x0 commited on
Commit
3dfa451
·
verified ·
1 Parent(s): d81ad24

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -48,7 +48,7 @@ For more powerful GPU usage and faster inference, you can deploy it on a Runpod
48
  import torch
49
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextStreamer
50
 
51
- model_name = "aigeek0x0/radiantloom-mixtral-8x7b-fusion"
52
 
53
  bnb_config = BitsAndBytesConfig(
54
  load_in_4bit=True,
 
48
  import torch
49
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextStreamer
50
 
51
+ model_name = "Radiantloom/radiantloom-mixtral-8x7b-fusion"
52
 
53
  bnb_config = BitsAndBytesConfig(
54
  load_in_4bit=True,