OmPrakashSingh1704 commited on
Commit
aef3ddc
1 Parent(s): 459ae37

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -3,14 +3,13 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch, re, json
4
  from datetime import datetime
5
 
6
- device = "cuda" # the device to load the model onto
7
 
8
  # Load model and tokenizer
9
  model = AutoModelForCausalLM.from_pretrained(
10
  "Qwen/Qwen1.5-0.5B-Chat",
11
  torch_dtype="auto",
12
  device_map="auto",
13
- ).to(device)
14
  tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat")
15
 
16
  if 'recipe' not in st.session_state:
@@ -120,7 +119,7 @@ def generate_recipe(user_inputs):
120
  )
121
 
122
  # Tokenize and move to the correct device
123
- model_inputs = tokenizer([text], return_tensors="pt").to(device)
124
  torch.cuda.empty_cache()
125
  with torch.no_grad():
126
  generated_ids = model.generate(
 
3
  import torch, re, json
4
  from datetime import datetime
5
 
 
6
 
7
  # Load model and tokenizer
8
  model = AutoModelForCausalLM.from_pretrained(
9
  "Qwen/Qwen1.5-0.5B-Chat",
10
  torch_dtype="auto",
11
  device_map="auto",
12
+ )
13
  tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat")
14
 
15
  if 'recipe' not in st.session_state:
 
119
  )
120
 
121
  # Tokenize and move to the correct device
122
+ model_inputs = tokenizer([text], return_tensors="pt")
123
  torch.cuda.empty_cache()
124
  with torch.no_grad():
125
  generated_ids = model.generate(