krrishD commited on
Commit
e039790
·
1 Parent(s): 3ae9a0c

Delete untitled32.py

Browse files
Files changed (1) hide show
  1. untitled32.py +0 -243
untitled32.py DELETED
@@ -1,243 +0,0 @@
1
- import random
2
- import gradio as gr
3
- import openai
4
- import os
5
-
6
- openai.api_key = os.environ.get("open_ai_key")
7
-
8
- prompt = ['''
9
- You are a ''',
10
- '''
11
- machine learning developer, trying to debug this code:
12
-
13
- StackTrace:
14
-
15
- Traceback (most recent call last):
16
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 258, in _bootstrap
17
- self.run()
18
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 93, in run
19
- self._target(*self._args, **self._kwargs)
20
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 61, in _worker_loop
21
- data_queue.put((idx, samples))
22
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py”, line 341, in put
23
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py”, line 51, in dumps
24
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py”, line 121, in reduce_storage
25
- RuntimeError: unable to open shared memory object </torch_54163_3383444026> in read-write mode at /opt/conda/conda-bld/pytorch_1525909934016/work/aten/src/TH/THAllocator.c:342
26
-
27
- During handling of the above exception, another exception occurred:
28
-
29
- Traceback (most recent call last):
30
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/util.py”, line 262, in _run_finalizers
31
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/util.py”, line 186, in call
32
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/shutil.py”, line 476, in rmtree
33
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/shutil.py”, line 474, in rmtree
34
- OSError: [Errno 24] Too many open files: ‘/tmp/pymp-sgew4xdn’
35
- Process Process-1:
36
- Traceback (most recent call last):
37
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 258, in _bootstrap
38
- self.run()
39
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 93, in run
40
- self._target(*self._args, **self._kwargs)
41
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 61, in _worker_loop
42
- data_queue.put((idx, samples))
43
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py”, line 341, in put
44
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py”, line 51, in dumps
45
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py”, line 121, in reduce_storage
46
- RuntimeError: unable to open shared memory object </torch_54163_3383444026> in read-write mode at /opt/conda/conda-bld/pytorch_1525909934016/work/aten/src/TH/THAllocator.c:342
47
- Traceback (most recent call last):
48
- File “/home/nlpgpu3/LinoHong/FakeNewsByTitle/main.py”, line 25, in
49
- for mini_batch in trainloader :
50
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 280, in next
51
- idx, batch = self._get_batch()
52
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 259, in _get_batch
53
- return self.data_queue.get()
54
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py”, line 335, in get
55
- res = self._reader.recv_bytes()
56
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py”, line 216, in recv_bytes
57
- buf = self._recv_bytes(maxlength)
58
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py”, line 407, in _recv_bytes
59
- buf = self._recv(4)
60
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py”, line 379, in _recv
61
- chunk = read(handle, remaining)
62
- File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 178, in handler
63
- _error_if_any_worker_fails()
64
- RuntimeError: DataLoader worker (pid 54163) exited unexpectedly with exit code 1.
65
-
66
- Process finished with exit code 1
67
-
68
- Question: Any idea how I can solve this problem?
69
- Are follow up questions needed here: Yes
70
- Follow up: Does your code run with less num_workers or num_workers=0?
71
- Intermediate Answer: It worked when I set num_workers equals to 0, but doesn’t work greater or equal to 1
72
- Follow up: Could you try to increase the shared memory and try setting num_workers>0 again?
73
- Intermediate Answer: It worked! Can you explain what happened here?
74
- So the final answer is: The error usually means that your system doesn’t provide enough shared memory for multiple workers (used via num_workers>0). Check the shared memory limitation of your system and try to increase it.
75
-
76
-
77
- StackTrace:
78
-
79
- RuntimeError: size mismatch (got input: [100000], target: [1000])
80
-
81
- Question: Any idea how I can solve this problem?
82
- Are follow up questions needed here: No
83
- So the final answer is: I don’t know which line of code creates the shape mismatch but would guess it’s raised in the loss calculation: loss = criterion(output.view(-1), batch['input_ids'].view(-1).to(device)). Print the shapes of both tensors and make sure they are expected in the used criterion. PS you can post code snippets by wrapping them into three backticks ```, which would make your code easier to read.
84
-
85
- StackTrace: ''',
86
- '''
87
- Question: ''',
88
- '''
89
- Are follow up questions needed here:''',]
90
-
91
-
92
-
93
- def extract_answer(generated):
94
- if '\n' not in generated:
95
- last_line = generated
96
- else:
97
- last_line = generated.split('\n')[-1]
98
-
99
- if ':' not in last_line:
100
- after_colon = last_line
101
- else:
102
- after_colon = generated.split(':')[-1]
103
-
104
- if ' ' == after_colon[0]:
105
- after_colon = after_colon[1:]
106
- if '.' == after_colon[-1]:
107
- after_colon = after_colon[:-1]
108
-
109
- return after_colon
110
-
111
- def extract_question(generated):
112
- if '\n' not in generated:
113
- last_line = generated
114
- else:
115
- last_line = generated.split('\n')[-1]
116
-
117
- if 'Follow up:' not in last_line:
118
- print('we probably should never get here...' + generated)
119
-
120
- if ':' not in last_line:
121
- after_colon = last_line
122
- else:
123
- after_colon = generated.split(':')[-1]
124
-
125
- if ' ' == after_colon[0]:
126
- after_colon = after_colon[1:]
127
- if '?' != after_colon[-1]:
128
- print('we probably should never get here...' + generated)
129
-
130
- return after_colon
131
-
132
- def get_last_line(generated):
133
- if '\n' not in generated:
134
- last_line = generated
135
- else:
136
- last_line = generated.split('\n')[-1]
137
-
138
-
139
- return last_line
140
-
141
- def greenify(input):
142
- return "\x1b[102m" + input + "\x1b[0m"
143
-
144
- def yellowfy(input):
145
- return "\x1b[106m" + input + "\x1b[0m"
146
-
147
- def call_gpt(cur_prompt, stop):
148
- ans = openai.Completion.create(
149
- model="text-davinci-002",
150
- max_tokens=256,
151
- stop=stop,
152
- prompt=cur_prompt,
153
- temperature=0.7,
154
- top_p=1,
155
- frequency_penalty=0,
156
- presence_penalty=0
157
- )
158
- returned = ans['choices'][0]['text']
159
- print( greenify(returned), end='')
160
- return returned
161
-
162
- def initial_query_builder(language, code, question, intermediate = "\nIntermediate Answer:", followup = "\nFollow up:", finalans= '\nSo the final answer is:'):
163
- cur_prompt = prompt[0] + language + prompt[1] + code + prompt[2] + question + prompt[3]
164
-
165
- # print("prompt: ", cur_prompt, end ='')
166
-
167
- ret_text = call_gpt(cur_prompt, intermediate)
168
- print("ret_text: ", ret_text)
169
- print("get_last_line(ret_text): ", get_last_line(ret_text))
170
- return ret_text
171
-
172
- def subsequent_query_builder(curr_prompt, external_answer, intermediate = "\nIntermediate Answer:", followup = "\nFollow up:", finalans= '\nSo the final answer is:'):
173
- curr_prompt += intermediate + ' ' + external_answer + '.'
174
- print(intermediate + ' ' + yellowfy(external_answer) + '.', end='' )
175
- ret_text = call_gpt(curr_prompt, intermediate)
176
- return ret_text
177
-
178
- """subsequent query builder:
179
-
180
- the way to rebuild the prompt for each subsequent call:
181
-
182
- 1. every user response is 'intermediate answer'
183
- 2. until you hit 'so the final answer is: ' you're good
184
- 3.
185
- """
186
-
187
- def prompt_builder(history, intermediate = "\nIntermediate Answer:", followup = "\nFollow up:", finalans= '\nSo the final answer is:'):
188
- #set language
189
- language = history[1][0]
190
- #set stack trace
191
- stacktrace = history[0][0]
192
- #set question (hardcoded)
193
- question = "Any idea how I can solve this problem?"
194
-
195
- # initial prompt
196
- curr_prompt = prompt[0] + language + prompt[1] + stacktrace + prompt[2] + question + prompt[3]
197
-
198
- #set subsequent conversation thread
199
- if len(history) > 2: #subsequent conversations have occurred
200
- curr_prompt += history[1][1] ## get the first response to the stacktrace prompt
201
- for conversation in history[2:]:
202
- #grab intermediate answer
203
- curr_prompt += intermediate + ' ' + conversation[0] + '.'
204
- #grab the follow up
205
- curr_prompt += conversation[1]
206
- return curr_prompt
207
-
208
- def chat(message, history):
209
- history = history or []
210
- print(len(history))
211
- if len(history) == 0: ## just the stacktrace
212
- response = "which language is this in? (python, java, c++, kotlin, etc.)"
213
- elif len(history) == 1: ## stacktrace + just entered the language
214
- # get stacktrace
215
- stacktrace = history[0][0]
216
- # get language
217
- language = message
218
- # set question (hardcoded for v1)
219
- question = "Any idea how I can solve this problem?"
220
- response = initial_query_builder(language, stacktrace, question)
221
- else: # subsequent prompts
222
- # get stacktrace
223
- stacktrace = history[0][0]
224
- # get language
225
- language = history[1][0]
226
- # set question (hardcoded for v1)
227
- question = "Any idea how I can solve this problem?"
228
- curr_prompt = prompt_builder(history)
229
- response = subsequent_query_builder(curr_prompt, message)
230
- # response = query_builder(language, stacktrace, question)
231
- print("response: ", response)
232
- history.append((message, response))
233
- return history, history
234
-
235
- chatbot = gr.Chatbot().style(color_map=("green", "pink"))
236
- demo = gr.Interface(
237
- chat,
238
- [gr.Textbox(placeholder="enter your stacktrace here"), "state"],
239
- [chatbot, "state"],
240
- allow_flagging="never",
241
- )
242
- if __name__ == "__main__":
243
- demo.launch(debug=True)