marcellopoliti
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -85,14 +85,14 @@ tokenizer = AutoTokenizer.from_pretrained("brianknowsai/Brian-Llama-3.2-3B")
|
|
85 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
86 |
model.to(device)
|
87 |
|
88 |
-
input_text = "A
|
89 |
|
90 |
# Tokenize the input text
|
91 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
92 |
|
93 |
# Generate output (this is typical for causal language models)
|
94 |
with torch.no_grad():
|
95 |
-
outputs = model.generate(input_ids, max_length=
|
96 |
|
97 |
# Decode the generated tokens to text
|
98 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
85 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
86 |
model.to(device)
|
87 |
|
88 |
+
input_text = "A web3 bridge is "
|
89 |
|
90 |
# Tokenize the input text
|
91 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
92 |
|
93 |
# Generate output (this is typical for causal language models)
|
94 |
with torch.no_grad():
|
95 |
+
outputs = model.generate(input_ids, max_length=80, num_return_sequences=1)
|
96 |
|
97 |
# Decode the generated tokens to text
|
98 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|