Update README.md
Browse files
README.md
CHANGED
@@ -48,7 +48,7 @@ For more powerful GPU usage and faster inference, you can deploy it on a Runpod
|
|
48 |
import torch
|
49 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextStreamer
|
50 |
|
51 |
-
model_name = "
|
52 |
|
53 |
bnb_config = BitsAndBytesConfig(
|
54 |
load_in_4bit=True,
|
|
|
48 |
import torch
|
49 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextStreamer
|
50 |
|
51 |
+
model_name = "Radiantloom/radiantloom-mixtral-8x7b-fusion"
|
52 |
|
53 |
bnb_config = BitsAndBytesConfig(
|
54 |
load_in_4bit=True,
|