PhantHive commited on
Commit
20f6853
1 Parent(s): 9641b31

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -12,11 +12,10 @@ tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
12
  model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
13
 
14
  # Load the Lora model
15
- model = PeftModel.from_pretrained(model, peft_model_id)
16
  model.to(device)
17
 
18
- def greet(text
19
- ):
20
  with torch.no_grad(): # Disable gradient calculation for inference
21
  batch = tokenizer(text, return_tensors='pt').to(device) # Move tensors to device
22
  with torch.cuda.amp.autocast(): # Enable mixed-precision if available
 
12
  model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
13
 
14
  # Load the Lora model
15
+ model = PeftModel.from_pretrained(model, model_id)
16
  model.to(device)
17
 
18
+ def greet(text):
 
19
  with torch.no_grad(): # Disable gradient calculation for inference
20
  batch = tokenizer(text, return_tensors='pt').to(device) # Move tensors to device
21
  with torch.cuda.amp.autocast(): # Enable mixed-precision if available