traversaal-ai commited on
Commit
2c99f68
1 Parent(s): a6b011a

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +2 -3
run.py CHANGED
@@ -15,8 +15,7 @@ from unsloth import FastLanguageModel
15
  import torch
16
 
17
 
18
- max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
19
- dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
20
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
21
 
22
 
@@ -32,7 +31,7 @@ from unsloth import FastLanguageModel
32
  model, tokenizer = FastLanguageModel.from_pretrained(
33
  model_name = "traversaal-llm-regional-languages/Unsloth_Urdu_Llama3_1_4bit_PF100",
34
  max_seq_length = max_seq_length,
35
- dtype = dtype,
36
  load_in_4bit = load_in_4bit
37
  )
38
  FastLanguageModel.for_inference(model)
 
15
  import torch
16
 
17
 
18
+ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally! # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
 
19
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
20
 
21
 
 
31
  model, tokenizer = FastLanguageModel.from_pretrained(
32
  model_name = "traversaal-llm-regional-languages/Unsloth_Urdu_Llama3_1_4bit_PF100",
33
  max_seq_length = max_seq_length,
34
+ dtype = 'Auto',
35
  load_in_4bit = load_in_4bit
36
  )
37
  FastLanguageModel.for_inference(model)