deltawi commited on
Commit
ed149fa
1 Parent(s): 1c52f8c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +24 -9
README.md CHANGED
@@ -1,6 +1,11 @@
1
  ---
2
  library_name: peft
3
  base_model: Qwen/Qwen1.5-1.8B-Chat
 
 
 
 
 
4
  ---
5
 
6
  # Qwen-1.5-1.8B-SQL Model
@@ -11,7 +16,7 @@ This model, `deltawi/Qwen-1.5-1.8B-SQL`, is fine-tuned on SQL generation based o
11
  ## Installation
12
  To use this model, you need to install the `transformers` library from Hugging Face. You can do this using pip:
13
  ```bash
14
- pip install transformers
15
  ```
16
 
17
  ## Usage
@@ -20,16 +25,26 @@ pip install transformers
20
  from transformers import AutoModelForCausalLM, AutoTokenizer
21
 
22
  # Set the device
23
- device = "mps" # replace with your device: "cpu", "cuda", "mps"
24
 
25
- # Load the model
26
- model = AutoModelForCausalLM.from_pretrained(
27
- "deltawi/Qwen-1.5-1.8B-SQL",
28
- device_map="auto"
29
- )
 
 
 
30
 
31
- # Load the tokenizer
32
- tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat")
 
 
 
 
 
 
 
33
 
34
  # Define your question and context
35
  Question = "Your question here"
 
1
  ---
2
  library_name: peft
3
  base_model: Qwen/Qwen1.5-1.8B-Chat
4
+ datasets:
5
+ - b-mc2/sql-create-context
6
+ language:
7
+ - en
8
+ pipeline_tag: text-generation
9
  ---
10
 
11
  # Qwen-1.5-1.8B-SQL Model
 
16
  ## Installation
17
  To use this model, you need to install the `transformers` library from Hugging Face. You can do this using pip:
18
  ```bash
19
+ pip install transformers huggingface_hub accelerate peft
20
  ```
21
 
22
  ## Usage
 
25
  from transformers import AutoModelForCausalLM, AutoTokenizer
26
 
27
  # Set the device
28
+ device = "cuda" # replace with your device: "cpu", "cuda", "mps"
29
 
30
+ from transformers import AutoModelForCausalLM, AutoTokenizer
31
+ import random
32
+
33
+
34
+ peft_model_id = "deltawi/Qwen-1.5-1.8B-SQL"
35
+ base_model_id = "Qwen/Qwen1.5-1.8B-Chat"
36
+
37
+ device = "cuda"
38
 
39
+ model = AutoModelForCausalLM.from_pretrained(base_model_id, device_map="auto")
40
+ model.load_adapter(peft_model_id)
41
+ tokenizer = AutoTokenizer.from_pretrained(
42
+ "deltawi/Qwen-1.5-1.8B-SQL",
43
+ #model_max_length=2048,
44
+ padding_side="right",
45
+ trust_remote_code=True,
46
+ pad_token='<|endoftext|>'
47
+ )
48
 
49
  # Define your question and context
50
  Question = "Your question here"