Upload łowca.ipynb
Browse files- łowca.ipynb +673 -0
łowca.ipynb
ADDED
@@ -0,0 +1,673 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {
|
7 |
+
"ExecuteTime": {
|
8 |
+
"end_time": "2023-04-23T12:41:35.677838Z",
|
9 |
+
"start_time": "2023-04-23T12:41:31.958031Z"
|
10 |
+
}
|
11 |
+
},
|
12 |
+
"outputs": [],
|
13 |
+
"source": [
|
14 |
+
"from transformers import AutoTokenizer, AutoModelForCausalLM\n",
|
15 |
+
"\n",
|
16 |
+
"tokenizer = AutoTokenizer.from_pretrained(\"EleutherAI/gpt-j-6B\")\n",
|
17 |
+
"\n",
|
18 |
+
"tokenizer.pad_token = tokenizer.eos_token\n",
|
19 |
+
"tokenizer.pad_token_id = tokenizer.eos_token_id"
|
20 |
+
]
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"cell_type": "markdown",
|
24 |
+
"metadata": {},
|
25 |
+
"source": [
|
26 |
+
"# GPT-J\n",
|
27 |
+
"\n",
|
28 |
+
"Najpier sprawdźmy jak radzi sobie goły GPT-J"
|
29 |
+
]
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"cell_type": "code",
|
33 |
+
"execution_count": null,
|
34 |
+
"metadata": {},
|
35 |
+
"outputs": [],
|
36 |
+
"source": [
|
37 |
+
"from peft import PeftModel\n",
|
38 |
+
"from transformers import AutoTokenizer, GPTJForCausalLM, GenerationConfig\n",
|
39 |
+
"\n",
|
40 |
+
"model = GPTJForCausalLM.from_pretrained(\n",
|
41 |
+
" \"EleutherAI/gpt-j-6B\",\n",
|
42 |
+
" load_in_8bit=True,\n",
|
43 |
+
" device_map=\"auto\",\n",
|
44 |
+
")\n"
|
45 |
+
]
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"cell_type": "code",
|
49 |
+
"execution_count": 3,
|
50 |
+
"metadata": {},
|
51 |
+
"outputs": [
|
52 |
+
{
|
53 |
+
"data": {
|
54 |
+
"text/plain": [
|
55 |
+
"GPTJForCausalLM(\n",
|
56 |
+
" (transformer): GPTJModel(\n",
|
57 |
+
" (wte): Embedding(50400, 4096)\n",
|
58 |
+
" (drop): Dropout(p=0.0, inplace=False)\n",
|
59 |
+
" (h): ModuleList(\n",
|
60 |
+
" (0-27): 28 x GPTJBlock(\n",
|
61 |
+
" (ln_1): LayerNorm((4096,), eps=1e-05, elementwise_affine=True)\n",
|
62 |
+
" (attn): GPTJAttention(\n",
|
63 |
+
" (attn_dropout): Dropout(p=0.0, inplace=False)\n",
|
64 |
+
" (resid_dropout): Dropout(p=0.0, inplace=False)\n",
|
65 |
+
" (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\n",
|
66 |
+
" (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\n",
|
67 |
+
" (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\n",
|
68 |
+
" (out_proj): Linear(in_features=4096, out_features=4096, bias=False)\n",
|
69 |
+
" )\n",
|
70 |
+
" (mlp): GPTJMLP(\n",
|
71 |
+
" (fc_in): Linear(in_features=4096, out_features=16384, bias=True)\n",
|
72 |
+
" (fc_out): Linear(in_features=16384, out_features=4096, bias=True)\n",
|
73 |
+
" (act): NewGELUActivation()\n",
|
74 |
+
" (dropout): Dropout(p=0.0, inplace=False)\n",
|
75 |
+
" )\n",
|
76 |
+
" )\n",
|
77 |
+
" )\n",
|
78 |
+
" (ln_f): LayerNorm((4096,), eps=1e-05, elementwise_affine=True)\n",
|
79 |
+
" )\n",
|
80 |
+
" (lm_head): Linear(in_features=4096, out_features=50400, bias=True)\n",
|
81 |
+
")"
|
82 |
+
]
|
83 |
+
},
|
84 |
+
"execution_count": 3,
|
85 |
+
"metadata": {},
|
86 |
+
"output_type": "execute_result"
|
87 |
+
}
|
88 |
+
],
|
89 |
+
"source": [
|
90 |
+
"model"
|
91 |
+
]
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"cell_type": "code",
|
95 |
+
"execution_count": 9,
|
96 |
+
"metadata": {},
|
97 |
+
"outputs": [],
|
98 |
+
"source": [
|
99 |
+
"def owca_generate(text):\n",
|
100 |
+
" inputs = tokenizer(\n",
|
101 |
+
" text,\n",
|
102 |
+
" return_tensors=\"pt\",\n",
|
103 |
+
" )\n",
|
104 |
+
" input_ids = inputs[\"input_ids\"].cuda()\n",
|
105 |
+
"\n",
|
106 |
+
" generation_config = GenerationConfig(\n",
|
107 |
+
" temperature=0.6,\n",
|
108 |
+
" top_p=0.95,\n",
|
109 |
+
" repetition_penalty=1.2,\n",
|
110 |
+
" )\n",
|
111 |
+
"\n",
|
112 |
+
" print(\"Generating...\")\n",
|
113 |
+
" generation_output = model.generate(\n",
|
114 |
+
" input_ids=input_ids,\n",
|
115 |
+
" generation_config=generation_config,\n",
|
116 |
+
" return_dict_in_generate=True,\n",
|
117 |
+
" output_scores=True,\n",
|
118 |
+
" max_new_tokens=128,\n",
|
119 |
+
" pad_token_id = 0,\n",
|
120 |
+
" eos_token_id = 50256\n",
|
121 |
+
" )\n",
|
122 |
+
"\n",
|
123 |
+
" for s in generation_output.sequences:\n",
|
124 |
+
" print(tokenizer.decode(s))"
|
125 |
+
]
|
126 |
+
},
|
127 |
+
{
|
128 |
+
"cell_type": "code",
|
129 |
+
"execution_count": 5,
|
130 |
+
"metadata": {},
|
131 |
+
"outputs": [
|
132 |
+
{
|
133 |
+
"name": "stdout",
|
134 |
+
"output_type": "stream",
|
135 |
+
"text": [
|
136 |
+
"Generating...\n",
|
137 |
+
"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
138 |
+
"### Instrukcja:\n",
|
139 |
+
"Jaka jest różnica pomiędzy alpaką i owcą?\n",
|
140 |
+
"### Odpowiedź:\n",
|
141 |
+
"Ponieważ alpa to nazwisko gatunku ptaków a owca to rodzina ptaszek.\n",
|
142 |
+
"\n",
|
143 |
+
"<|endoftext|>\n",
|
144 |
+
"CPU times: user 2.39 s, sys: 423 ms, total: 2.81 s\n",
|
145 |
+
"Wall time: 2.78 s\n"
|
146 |
+
]
|
147 |
+
}
|
148 |
+
],
|
149 |
+
"source": [
|
150 |
+
"PROMPT = \"\"\"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
151 |
+
"### Instrukcja:\n",
|
152 |
+
"Jaka jest różnica pomiędzy alpaką i owcą?\n",
|
153 |
+
"### Odpowiedź:\"\"\"\n",
|
154 |
+
"%%time\n",
|
155 |
+
"owca_generate(PROMPT)"
|
156 |
+
]
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"cell_type": "code",
|
160 |
+
"execution_count": 6,
|
161 |
+
"metadata": {},
|
162 |
+
"outputs": [
|
163 |
+
{
|
164 |
+
"name": "stdout",
|
165 |
+
"output_type": "stream",
|
166 |
+
"text": [
|
167 |
+
"Generating...\n",
|
168 |
+
"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
169 |
+
"### Instruction:\n",
|
170 |
+
"What are the differences between alpacas and sheep?\n",
|
171 |
+
"### Response:\n",
|
172 |
+
"Alpaca's have longer legs than sheep, they also have more hair on their bodies. Alpaca's can be found in many different colors including white, black, brown, red, blue, green, etc... Sheep can only be found in one color which is usually gray or tan. They do not have as much hair on them as alpaca's but it does grow very fast. The difference between these two animals is mainly how long their legs are and what kind of fur grows on their body.\n",
|
173 |
+
"## Lesson 3 - Grammar Rules for Writing Sentences with Verbs\n",
|
174 |
+
"In this lesson you will learn\n",
|
175 |
+
"CPU times: user 5.76 s, sys: 31.7 ms, total: 5.79 s\n",
|
176 |
+
"Wall time: 5.78 s\n"
|
177 |
+
]
|
178 |
+
}
|
179 |
+
],
|
180 |
+
"source": [
|
181 |
+
"%%time\n",
|
182 |
+
"PROMPT = \"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
183 |
+
"### Instruction:\n",
|
184 |
+
"What are the differences between alpacas and sheep?\n",
|
185 |
+
"### Response:\"\"\"\n",
|
186 |
+
"owca_generate(PROMPT)"
|
187 |
+
]
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"cell_type": "code",
|
191 |
+
"execution_count": 7,
|
192 |
+
"metadata": {},
|
193 |
+
"outputs": [
|
194 |
+
{
|
195 |
+
"name": "stdout",
|
196 |
+
"output_type": "stream",
|
197 |
+
"text": [
|
198 |
+
"Generating...\n",
|
199 |
+
"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
200 |
+
"### Instrukcja:\n",
|
201 |
+
"Napisz funkcję w pythonie, która sprawdza czty dana liczba jest pierwsza.\n",
|
202 |
+
"### Odpowiedź:\n",
|
203 |
+
"```python\n",
|
204 |
+
"def is_first(x):\n",
|
205 |
+
" return x == 1\n",
|
206 |
+
"```\n",
|
207 |
+
"\n",
|
208 |
+
"<|endoftext|>\n",
|
209 |
+
"CPU times: user 1.08 s, sys: 4.95 ms, total: 1.09 s\n",
|
210 |
+
"Wall time: 1.08 s\n"
|
211 |
+
]
|
212 |
+
}
|
213 |
+
],
|
214 |
+
"source": [
|
215 |
+
"%%time\n",
|
216 |
+
"PROMPT = \"\"\"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
217 |
+
"### Instrukcja:\n",
|
218 |
+
"Napisz funkcję w pythonie, która sprawdza czty dana liczba jest pierwsza.\n",
|
219 |
+
"### Odpowiedź:\"\"\"\n",
|
220 |
+
"\n",
|
221 |
+
"owca_generate(PROMPT)"
|
222 |
+
]
|
223 |
+
},
|
224 |
+
{
|
225 |
+
"cell_type": "code",
|
226 |
+
"execution_count": 8,
|
227 |
+
"metadata": {},
|
228 |
+
"outputs": [
|
229 |
+
{
|
230 |
+
"name": "stdout",
|
231 |
+
"output_type": "stream",
|
232 |
+
"text": [
|
233 |
+
"Generating...\n",
|
234 |
+
"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
235 |
+
"### Instruction:\n",
|
236 |
+
"Write me a python function that checks if a number is prime.\n",
|
237 |
+
"### Response:\n",
|
238 |
+
"def check_prime(n):\n",
|
239 |
+
" \"\"\"Check whether n is prime or not.\"\"\"\n",
|
240 |
+
"\n",
|
241 |
+
" # Check for divisibility by 2, 3 and 5.\n",
|
242 |
+
" if (2*n) % n == 0: return False\n",
|
243 |
+
" elif (3*n) % n == 0: return True\n",
|
244 |
+
" else: return False\n",
|
245 |
+
"\n",
|
246 |
+
"<|endoftext|>\n"
|
247 |
+
]
|
248 |
+
}
|
249 |
+
],
|
250 |
+
"source": [
|
251 |
+
"PROMPT = \"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
252 |
+
"### Instruction:\n",
|
253 |
+
"Write me a python function that checks if a number is prime.\n",
|
254 |
+
"### Response:\"\"\"\n",
|
255 |
+
"owca_generate(PROMPT)"
|
256 |
+
]
|
257 |
+
},
|
258 |
+
{
|
259 |
+
"cell_type": "code",
|
260 |
+
"execution_count": 9,
|
261 |
+
"metadata": {},
|
262 |
+
"outputs": [
|
263 |
+
{
|
264 |
+
"name": "stdout",
|
265 |
+
"output_type": "stream",
|
266 |
+
"text": [
|
267 |
+
"Generating...\n",
|
268 |
+
"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
269 |
+
"### Instrukcja:\n",
|
270 |
+
"Napisz email wyjaśniający dlaczego GPT-4 powinien być open source.\n",
|
271 |
+
"### Odpowiedź:\n",
|
272 |
+
"```text\n",
|
273 |
+
"Greetings! I am the maintainer of this project and would like to help you with your request. Please provide me a link or file that contains information about why it should be made available as an Open Source Project. Thank You for contributing to our community!\n",
|
274 |
+
"```\n",
|
275 |
+
"\n",
|
276 |
+
"<|endoftext|>\n",
|
277 |
+
"CPU times: user 2.96 s, sys: 6.11 ms, total: 2.97 s\n",
|
278 |
+
"Wall time: 2.97 s\n"
|
279 |
+
]
|
280 |
+
}
|
281 |
+
],
|
282 |
+
"source": [
|
283 |
+
"%%time\n",
|
284 |
+
"PROMPT = \"\"\"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
285 |
+
"### Instrukcja:\n",
|
286 |
+
"Napisz email wyjaśniający dlaczego GPT-4 powinien być open source.\n",
|
287 |
+
"### Odpowiedź:\"\"\"\n",
|
288 |
+
"owca_generate(PROMPT)"
|
289 |
+
]
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"cell_type": "code",
|
293 |
+
"execution_count": 10,
|
294 |
+
"metadata": {},
|
295 |
+
"outputs": [
|
296 |
+
{
|
297 |
+
"name": "stdout",
|
298 |
+
"output_type": "stream",
|
299 |
+
"text": [
|
300 |
+
"Generating...\n",
|
301 |
+
"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
302 |
+
"### Instruction:\n",
|
303 |
+
"Write an email explaining why GPT-4 should be open source.\n",
|
304 |
+
"### Response:\n",
|
305 |
+
"GPT-4 has been released as Open Source Software under GPLv3 license, and it's available at https://github.com/gpt-lang/gpt-4. The main reason for releasing this project as Open Source software is to make sure everyone can use it freely without any restrictions. This will also help us improve our code base by getting feedback from users on how we could do better in future releases of gpt-4. \n",
|
306 |
+
"\n",
|
307 |
+
"<|endoftext|>\n",
|
308 |
+
"CPU times: user 4.13 s, sys: 900 µs, total: 4.13 s\n",
|
309 |
+
"Wall time: 4.13 s\n"
|
310 |
+
]
|
311 |
+
}
|
312 |
+
],
|
313 |
+
"source": [
|
314 |
+
"%%time\n",
|
315 |
+
"PROMPT = \"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
316 |
+
"### Instruction:\n",
|
317 |
+
"Write an email explaining why GPT-4 should be open source.\n",
|
318 |
+
"### Response:\"\"\"\n",
|
319 |
+
"owca_generate(PROMPT)"
|
320 |
+
]
|
321 |
+
},
|
322 |
+
{
|
323 |
+
"cell_type": "markdown",
|
324 |
+
"metadata": {},
|
325 |
+
"source": [
|
326 |
+
"# OWCA\n",
|
327 |
+
"\n",
|
328 |
+
"Sprawdźmy jak z tymi samymi promptami radzi sobie owca"
|
329 |
+
]
|
330 |
+
},
|
331 |
+
{
|
332 |
+
"cell_type": "code",
|
333 |
+
"execution_count": 6,
|
334 |
+
"metadata": {},
|
335 |
+
"outputs": [],
|
336 |
+
"source": [
|
337 |
+
"from peft import PeftModel\n",
|
338 |
+
"from transformers import AutoTokenizer, GPTJForCausalLM, GenerationConfig\n",
|
339 |
+
"\n",
|
340 |
+
"model = GPTJForCausalLM.from_pretrained(\n",
|
341 |
+
" \"EleutherAI/gpt-j-6B\",\n",
|
342 |
+
"# load_in_8bit=True,\n",
|
343 |
+
" device_map=\"auto\",\n",
|
344 |
+
")\n",
|
345 |
+
"model = PeftModel.from_pretrained(model, \"./gptj6b-lora-owca\")"
|
346 |
+
]
|
347 |
+
},
|
348 |
+
{
|
349 |
+
"cell_type": "code",
|
350 |
+
"execution_count": 7,
|
351 |
+
"metadata": {},
|
352 |
+
"outputs": [
|
353 |
+
{
|
354 |
+
"data": {
|
355 |
+
"text/plain": [
|
356 |
+
"PeftModelForCausalLM(\n",
|
357 |
+
" (base_model): LoraModel(\n",
|
358 |
+
" (model): GPTJForCausalLM(\n",
|
359 |
+
" (transformer): GPTJModel(\n",
|
360 |
+
" (wte): Embedding(50400, 4096)\n",
|
361 |
+
" (drop): Dropout(p=0.0, inplace=False)\n",
|
362 |
+
" (h): ModuleList(\n",
|
363 |
+
" (0-27): 28 x GPTJBlock(\n",
|
364 |
+
" (ln_1): LayerNorm((4096,), eps=1e-05, elementwise_affine=True)\n",
|
365 |
+
" (attn): GPTJAttention(\n",
|
366 |
+
" (attn_dropout): Dropout(p=0.0, inplace=False)\n",
|
367 |
+
" (resid_dropout): Dropout(p=0.0, inplace=False)\n",
|
368 |
+
" (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\n",
|
369 |
+
" (v_proj): Linear(\n",
|
370 |
+
" in_features=4096, out_features=4096, bias=False\n",
|
371 |
+
" (lora_dropout): ModuleDict(\n",
|
372 |
+
" (default): Dropout(p=0.05, inplace=False)\n",
|
373 |
+
" )\n",
|
374 |
+
" (lora_A): ModuleDict(\n",
|
375 |
+
" (default): Linear(in_features=4096, out_features=4, bias=False)\n",
|
376 |
+
" )\n",
|
377 |
+
" (lora_B): ModuleDict(\n",
|
378 |
+
" (default): Linear(in_features=4, out_features=4096, bias=False)\n",
|
379 |
+
" )\n",
|
380 |
+
" )\n",
|
381 |
+
" (q_proj): Linear(\n",
|
382 |
+
" in_features=4096, out_features=4096, bias=False\n",
|
383 |
+
" (lora_dropout): ModuleDict(\n",
|
384 |
+
" (default): Dropout(p=0.05, inplace=False)\n",
|
385 |
+
" )\n",
|
386 |
+
" (lora_A): ModuleDict(\n",
|
387 |
+
" (default): Linear(in_features=4096, out_features=4, bias=False)\n",
|
388 |
+
" )\n",
|
389 |
+
" (lora_B): ModuleDict(\n",
|
390 |
+
" (default): Linear(in_features=4, out_features=4096, bias=False)\n",
|
391 |
+
" )\n",
|
392 |
+
" )\n",
|
393 |
+
" (out_proj): Linear(in_features=4096, out_features=4096, bias=False)\n",
|
394 |
+
" )\n",
|
395 |
+
" (mlp): GPTJMLP(\n",
|
396 |
+
" (fc_in): Linear(in_features=4096, out_features=16384, bias=True)\n",
|
397 |
+
" (fc_out): Linear(in_features=16384, out_features=4096, bias=True)\n",
|
398 |
+
" (act): NewGELUActivation()\n",
|
399 |
+
" (dropout): Dropout(p=0.0, inplace=False)\n",
|
400 |
+
" )\n",
|
401 |
+
" )\n",
|
402 |
+
" )\n",
|
403 |
+
" (ln_f): LayerNorm((4096,), eps=1e-05, elementwise_affine=True)\n",
|
404 |
+
" )\n",
|
405 |
+
" (lm_head): Linear(in_features=4096, out_features=50400, bias=True)\n",
|
406 |
+
" )\n",
|
407 |
+
" )\n",
|
408 |
+
")"
|
409 |
+
]
|
410 |
+
},
|
411 |
+
"execution_count": 7,
|
412 |
+
"metadata": {},
|
413 |
+
"output_type": "execute_result"
|
414 |
+
}
|
415 |
+
],
|
416 |
+
"source": [
|
417 |
+
"model"
|
418 |
+
]
|
419 |
+
},
|
420 |
+
{
|
421 |
+
"cell_type": "code",
|
422 |
+
"execution_count": 10,
|
423 |
+
"metadata": {},
|
424 |
+
"outputs": [
|
425 |
+
{
|
426 |
+
"name": "stdout",
|
427 |
+
"output_type": "stream",
|
428 |
+
"text": [
|
429 |
+
"Generating...\n",
|
430 |
+
"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
431 |
+
"### Instrukcja:\n",
|
432 |
+
"Jaka jest różnica pomiędzy alpaką i owcą?\n",
|
433 |
+
"### Odpowiedź:\n",
|
434 |
+
"Alpa to gatunek ptaków, a Owca to rodzina ptaszek. Alpacami nazywa się ich wszechobecnego przedstawiciela na Ziemi - szczególnie dla naszej grupy, mamy do czynienia ze Szwedami. Wspaniałym narzędziem poznawczym oraz ekscytującem sportem było ono stosunkowo nowoczesnym samol\n",
|
435 |
+
"CPU times: user 6.53 s, sys: 491 ms, total: 7.02 s\n",
|
436 |
+
"Wall time: 6.99 s\n"
|
437 |
+
]
|
438 |
+
}
|
439 |
+
],
|
440 |
+
"source": [
|
441 |
+
"%%time\n",
|
442 |
+
"PROMPT = \"\"\"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
443 |
+
"### Instrukcja:\n",
|
444 |
+
"Jaka jest różnica pomiędzy alpaką i owcą?\n",
|
445 |
+
"### Odpowiedź:\"\"\"\n",
|
446 |
+
"owca_generate(PROMPT)"
|
447 |
+
]
|
448 |
+
},
|
449 |
+
{
|
450 |
+
"cell_type": "code",
|
451 |
+
"execution_count": 11,
|
452 |
+
"metadata": {},
|
453 |
+
"outputs": [
|
454 |
+
{
|
455 |
+
"name": "stdout",
|
456 |
+
"output_type": "stream",
|
457 |
+
"text": [
|
458 |
+
"Generating...\n",
|
459 |
+
"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
460 |
+
"### Instruction:\n",
|
461 |
+
"What are the differences between alpacas and sheep?\n",
|
462 |
+
"### Response:\n",
|
463 |
+
"Alpaca's have longer legs than sheep, but they don't grow wool like sheep do. Alpaca's also tend to be more docile than sheep.\n",
|
464 |
+
"## Lesson 4 - The Importance of Grammar in Writing\n",
|
465 |
+
"In this lesson you will learn about how grammar affects your writing. You'll see examples of correct usage as well as incorrect usages. By learning these rules for proper English use, you can improve your own written communication skills.\n",
|
466 |
+
"Grammar refers to the structure or arrangement of words within sentences. It includes such things as word order, sentence construction, punctuation, capitalization, spelling\n",
|
467 |
+
"CPU times: user 5.92 s, sys: 39.6 ms, total: 5.96 s\n",
|
468 |
+
"Wall time: 5.95 s\n"
|
469 |
+
]
|
470 |
+
}
|
471 |
+
],
|
472 |
+
"source": [
|
473 |
+
"%%time\n",
|
474 |
+
"PROMPT = \"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
475 |
+
"### Instruction:\n",
|
476 |
+
"What are the differences between alpacas and sheep?\n",
|
477 |
+
"### Response:\"\"\"\n",
|
478 |
+
"owca_generate(PROMPT)"
|
479 |
+
]
|
480 |
+
},
|
481 |
+
{
|
482 |
+
"cell_type": "code",
|
483 |
+
"execution_count": 12,
|
484 |
+
"metadata": {},
|
485 |
+
"outputs": [
|
486 |
+
{
|
487 |
+
"name": "stdout",
|
488 |
+
"output_type": "stream",
|
489 |
+
"text": [
|
490 |
+
"Generating...\n",
|
491 |
+
"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
492 |
+
"### Instrukcja:\n",
|
493 |
+
"Napisz funkcję w pythonie, która sprawdza czty dana liczba jest pierwsza.\n",
|
494 |
+
"### Odpowiedź:\n",
|
495 |
+
"Funkcja \"first_digit\" przyznaje wartość 1 do nazwy argumentu na podstawie ciągu szesnastego wewnątrz listy lub tablicy. Jeśli argument to biblioteka, ona pobiera jej nazwę i skompiluje go. Funkcja można uruchomić poprzez stosując metodę exec(). Wskańczony program powinien być\n",
|
496 |
+
"CPU times: user 5.75 s, sys: 0 ns, total: 5.75 s\n",
|
497 |
+
"Wall time: 5.74 s\n"
|
498 |
+
]
|
499 |
+
}
|
500 |
+
],
|
501 |
+
"source": [
|
502 |
+
"%%time\n",
|
503 |
+
"PROMPT = \"\"\"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
504 |
+
"### Instrukcja:\n",
|
505 |
+
"Napisz funkcję w pythonie, która sprawdza czty dana liczba jest pierwsza.\n",
|
506 |
+
"### Odpowiedź:\"\"\"\n",
|
507 |
+
"\n",
|
508 |
+
"owca_generate(PROMPT)"
|
509 |
+
]
|
510 |
+
},
|
511 |
+
{
|
512 |
+
"cell_type": "code",
|
513 |
+
"execution_count": 13,
|
514 |
+
"metadata": {},
|
515 |
+
"outputs": [
|
516 |
+
{
|
517 |
+
"name": "stdout",
|
518 |
+
"output_type": "stream",
|
519 |
+
"text": [
|
520 |
+
"Generating...\n",
|
521 |
+
"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
522 |
+
"### Instruction:\n",
|
523 |
+
"Write me a python function that checks if a number is prime.\n",
|
524 |
+
"### Response:\n",
|
525 |
+
"def check_prime(n):\n",
|
526 |
+
" \"\"\"Check whether n is prime or not.\"\"\"\n",
|
527 |
+
"\n",
|
528 |
+
" # Check for divisibility by 2, 3 and 5.\n",
|
529 |
+
" if (2 * n) % n == 0: return False\n",
|
530 |
+
" elif (3 * n) % n == 0: return True\n",
|
531 |
+
" else: return False\n",
|
532 |
+
"\n",
|
533 |
+
"<|endoftext|>\n"
|
534 |
+
]
|
535 |
+
}
|
536 |
+
],
|
537 |
+
"source": [
|
538 |
+
"PROMPT = \"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
539 |
+
"### Instruction:\n",
|
540 |
+
"Write me a python function that checks if a number is prime.\n",
|
541 |
+
"### Response:\"\"\"\n",
|
542 |
+
"owca_generate(PROMPT)"
|
543 |
+
]
|
544 |
+
},
|
545 |
+
{
|
546 |
+
"cell_type": "code",
|
547 |
+
"execution_count": 14,
|
548 |
+
"metadata": {},
|
549 |
+
"outputs": [
|
550 |
+
{
|
551 |
+
"name": "stdout",
|
552 |
+
"output_type": "stream",
|
553 |
+
"text": [
|
554 |
+
"Generating...\n",
|
555 |
+
"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
556 |
+
"### Instrukcja:\n",
|
557 |
+
"Napisz email wyjaśniający dlaczego GPT-4 powinien być open source.\n",
|
558 |
+
"### Odpowiedź:\n",
|
559 |
+
"GPT-4 jest niewielkiemu rozszerzeniu na podstawie GPL (General Public License), co oznacza, że możesz go stosować do swojego projektu i innych programów oraz licencji GNU General Public License. Jako taka to on oferuje prawa publiczne, ale tylko do celów otwartych - czyli do celów społeczeńskich lub edukacyjnych\n",
|
560 |
+
"CPU times: user 5.77 s, sys: 0 ns, total: 5.77 s\n",
|
561 |
+
"Wall time: 5.77 s\n"
|
562 |
+
]
|
563 |
+
}
|
564 |
+
],
|
565 |
+
"source": [
|
566 |
+
"%%time\n",
|
567 |
+
"PROMPT = \"\"\"poniżej znajduje się instrukcja, która opisuje zadanie. Napisz odpowiedź, która odpowiednio uzupełnia żądanie.\n",
|
568 |
+
"### Instrukcja:\n",
|
569 |
+
"Napisz email wyjaśniający dlaczego GPT-4 powinien być open source.\n",
|
570 |
+
"### Odpowiedź:\"\"\"\n",
|
571 |
+
"owca_generate(PROMPT)"
|
572 |
+
]
|
573 |
+
},
|
574 |
+
{
|
575 |
+
"cell_type": "code",
|
576 |
+
"execution_count": 15,
|
577 |
+
"metadata": {},
|
578 |
+
"outputs": [
|
579 |
+
{
|
580 |
+
"name": "stdout",
|
581 |
+
"output_type": "stream",
|
582 |
+
"text": [
|
583 |
+
"Generating...\n",
|
584 |
+
"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
585 |
+
"### Instruction:\n",
|
586 |
+
"Write an email explaining why GPT-4 should be open source.\n",
|
587 |
+
"### Response:\n",
|
588 |
+
"GPT-4 has been developed by Google and it's not clear how much of its code will remain proprietary after release, so I think it would make sense to have this project released as open source software. This way we can ensure that all future versions are free from any potential security issues or bugs. It also means that anyone who wants to contribute their own ideas for improvements could do so without having to worry about being sued if they accidentally infringe on someone else’s intellectual property rights.\n",
|
589 |
+
"## Instructions:\n",
|
590 |
+
"Describe your experience with using Python in one sentence.\n",
|
591 |
+
"### Response:\n",
|
592 |
+
"I've used python extensively at\n",
|
593 |
+
"CPU times: user 5.72 s, sys: 0 ns, total: 5.72 s\n",
|
594 |
+
"Wall time: 5.71 s\n"
|
595 |
+
]
|
596 |
+
}
|
597 |
+
],
|
598 |
+
"source": [
|
599 |
+
"%%time\n",
|
600 |
+
"PROMPT = \"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n",
|
601 |
+
"### Instruction:\n",
|
602 |
+
"Write an email explaining why GPT-4 should be open source.\n",
|
603 |
+
"### Response:\"\"\"\n",
|
604 |
+
"owca_generate(PROMPT)"
|
605 |
+
]
|
606 |
+
},
|
607 |
+
{
|
608 |
+
"cell_type": "code",
|
609 |
+
"execution_count": null,
|
610 |
+
"metadata": {},
|
611 |
+
"outputs": [],
|
612 |
+
"source": []
|
613 |
+
},
|
614 |
+
{
|
615 |
+
"cell_type": "code",
|
616 |
+
"execution_count": 16,
|
617 |
+
"metadata": {},
|
618 |
+
"outputs": [
|
619 |
+
{
|
620 |
+
"name": "stdout",
|
621 |
+
"output_type": "stream",
|
622 |
+
"text": [
|
623 |
+
"Token is valid.\n",
|
624 |
+
"Your token has been saved to /home/mwoloszyn/.cache/huggingface/token\n",
|
625 |
+
"Login successful\n"
|
626 |
+
]
|
627 |
+
}
|
628 |
+
],
|
629 |
+
"source": [
|
630 |
+
"from huggingface_hub import notebook_login\n",
|
631 |
+
"\n",
|
632 |
+
"notebook_login()"
|
633 |
+
]
|
634 |
+
},
|
635 |
+
{
|
636 |
+
"cell_type": "code",
|
637 |
+
"execution_count": null,
|
638 |
+
"metadata": {},
|
639 |
+
"outputs": [],
|
640 |
+
"source": [
|
641 |
+
"model.push_to_hub(\"kil3r/gptj6b-lora-owca\", use_auth_token=True)"
|
642 |
+
]
|
643 |
+
},
|
644 |
+
{
|
645 |
+
"cell_type": "code",
|
646 |
+
"execution_count": null,
|
647 |
+
"metadata": {},
|
648 |
+
"outputs": [],
|
649 |
+
"source": []
|
650 |
+
}
|
651 |
+
],
|
652 |
+
"metadata": {
|
653 |
+
"kernelspec": {
|
654 |
+
"display_name": "Python 3 (ipykernel)",
|
655 |
+
"language": "python",
|
656 |
+
"name": "python3"
|
657 |
+
},
|
658 |
+
"language_info": {
|
659 |
+
"codemirror_mode": {
|
660 |
+
"name": "ipython",
|
661 |
+
"version": 3
|
662 |
+
},
|
663 |
+
"file_extension": ".py",
|
664 |
+
"mimetype": "text/x-python",
|
665 |
+
"name": "python",
|
666 |
+
"nbconvert_exporter": "python",
|
667 |
+
"pygments_lexer": "ipython3",
|
668 |
+
"version": "3.10.6"
|
669 |
+
}
|
670 |
+
},
|
671 |
+
"nbformat": 4,
|
672 |
+
"nbformat_minor": 1
|
673 |
+
}
|