marcellopoliti commited on
Commit
08eeb67
·
verified ·
1 Parent(s): d2bb6c6

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -85,14 +85,14 @@ tokenizer = AutoTokenizer.from_pretrained("brianknowsai/Brian-Llama-3.2-3B")
85
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
86
  model.to(device)
87
 
88
- input_text = "A frame is "
89
 
90
  # Tokenize the input text
91
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
92
 
93
  # Generate output (this is typical for causal language models)
94
  with torch.no_grad():
95
- outputs = model.generate(input_ids, max_length=50, num_return_sequences=1)
96
 
97
  # Decode the generated tokens to text
98
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
85
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
86
  model.to(device)
87
 
88
+ input_text = "A web3 bridge is "
89
 
90
  # Tokenize the input text
91
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
92
 
93
  # Generate output (this is typical for causal language models)
94
  with torch.no_grad():
95
+ outputs = model.generate(input_ids, max_length=80, num_return_sequences=1)
96
 
97
  # Decode the generated tokens to text
98
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)