File size: 15,248 Bytes
ea40fbd
 
 
 
300207f
ea40fbd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aadc18f
 
 
 
 
 
ea40fbd
aadc18f
 
 
 
 
 
 
 
ea40fbd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aadc18f
 
 
 
 
 
 
 
 
 
 
 
ea40fbd
 
 
aadc18f
ea40fbd
aadc18f
 
 
 
 
 
 
 
 
 
 
 
 
 
ea40fbd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aadc18f
ea40fbd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
970883c
 
 
ea40fbd
230b925
 
 
 
 
 
 
 
 
c7fbabd
 
230b925
970883c
230b925
1396358
b84414f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1396358
b84414f
 
 
 
 
 
1396358
ea40fbd
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
import random
import gradio as gr
import openai
import os
import re

openai.api_key = os.environ.get("open_ai_key")

prompt = ['''
You are a ''', 
'''
machine learning developer, trying to debug this code:

StackTrace: 

    Traceback (most recent call last):
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 258, in _bootstrap
    self.run()
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 93, in run
    self._target(*self._args, **self._kwargs)
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 61, in _worker_loop
    data_queue.put((idx, samples))
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py”, line 341, in put
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py”, line 51, in dumps
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py”, line 121, in reduce_storage
    RuntimeError: unable to open shared memory object </torch_54163_3383444026> in read-write mode at /opt/conda/conda-bld/pytorch_1525909934016/work/aten/src/TH/THAllocator.c:342

    During handling of the above exception, another exception occurred:

    Traceback (most recent call last):
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/util.py”, line 262, in _run_finalizers
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/util.py”, line 186, in call
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/shutil.py”, line 476, in rmtree
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/shutil.py”, line 474, in rmtree
    OSError: [Errno 24] Too many open files: ‘/tmp/pymp-sgew4xdn’
    Process Process-1:
    Traceback (most recent call last):
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 258, in _bootstrap
    self.run()
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py”, line 93, in run
    self._target(*self._args, **self._kwargs)
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 61, in _worker_loop
    data_queue.put((idx, samples))
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py”, line 341, in put
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py”, line 51, in dumps
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py”, line 121, in reduce_storage
    RuntimeError: unable to open shared memory object </torch_54163_3383444026> in read-write mode at /opt/conda/conda-bld/pytorch_1525909934016/work/aten/src/TH/THAllocator.c:342
    Traceback (most recent call last):
    File “/home/nlpgpu3/LinoHong/FakeNewsByTitle/main.py”, line 25, in
    for mini_batch in trainloader :
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 280, in next
    idx, batch = self._get_batch()
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 259, in _get_batch
    return self.data_queue.get()
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py”, line 335, in get
    res = self._reader.recv_bytes()
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py”, line 216, in recv_bytes
    buf = self._recv_bytes(maxlength)
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py”, line 407, in _recv_bytes
    buf = self._recv(4)
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py”, line 379, in _recv
    chunk = read(handle, remaining)
    File “/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py”, line 178, in handler
    _error_if_any_worker_fails()
    RuntimeError: DataLoader worker (pid 54163) exited unexpectedly with exit code 1.

    Process finished with exit code 1

Question: Any idea how I can solve this problem?
Are follow up questions needed here: Yes
Follow up: Does your code run with less num_workers or num_workers=0? 
Intermediate Answer: It worked when I set num_workers equals to 0, but doesn’t work greater or equal to 1
Follow up: Could you try to increase the shared memory and try setting num_workers>0 again?
Intermediate Answer: It worked! Can you explain what happened here? 
So the final answer is: The error usually means that your system doesn’t provide enough shared memory for multiple workers (used via num_workers>0). Check the shared memory limitation of your system and try to increase it.

StackTrace: 

Traceback (most recent call last):
  File "main.py", line 39, in <module>
    request = create_request(page)
  File "main.py", line 15, in create_request
    url = base_url + data
TypeError: can only concatenate str (not "bytes") to str


Question: How do I fix this? 
Are follow up questions needed here: Yes
Follow up: Could you try to decode the data before passing it to the url? 
Intermediate Answer: Yes, it made the data a string and worked!
So the final answer is: You can try to decode the data before passing it to the url like this: 

data = urllib.parse.unquote(data)

StackTrace: ''',
'''
Question: ''', 
'''
Are follow up questions needed here:''',]



def extract_answer(generated):
    if '\n' not in generated:
        last_line =  generated
    else: 
        last_line = generated.split('\n')[-1]

    if ':' not in last_line:
        after_colon = last_line
    else:
        after_colon = generated.split(':')[-1]
    
    if ' ' == after_colon[0]:
        after_colon = after_colon[1:]
    if '.' == after_colon[-1]:
        after_colon = after_colon[:-1]

    return after_colon

def extract_question(generated):
    if '\n' not in generated:
        last_line =  generated
    else: 
        last_line = generated.split('\n')[-1]

    if 'Follow up:' not in last_line:
      print('we probably should never get here...' + generated)

    if ':' not in last_line:
        after_colon = last_line
    else:
        after_colon = generated.split(':')[-1]
    
    if ' ' == after_colon[0]:
        after_colon = after_colon[1:]
    if '?' != after_colon[-1]:
        print('we probably should never get here...' + generated)

    return after_colon

def get_last_line(generated):
    if '\n' not in generated:
        last_line =  generated
    else: 
        last_line = generated.split('\n')[-1]


    return last_line
  
def greenify(input):
  return "\x1b[102m" + input + "\x1b[0m"

def yellowfy(input):
  return "\x1b[106m" + input + "\x1b[0m"

def call_gpt(cur_prompt, stop):
    ans = openai.Completion.create(
                model="text-davinci-002",
                max_tokens=256,
                stop=stop,
                prompt=cur_prompt,
                temperature=0.7,
                top_p=1,
                frequency_penalty=0,
                presence_penalty=0
              )  
    returned = ans['choices'][0]['text']
    print( greenify(returned), end='')
    return returned

def initial_query_builder(language, code, question, intermediate = "\nIntermediate Answer:", followup = "\nFollow up:", finalans= '\nSo the final answer is:'):
  cur_prompt = prompt[0] +  language + prompt[1] + code + prompt[2] + question + prompt[3]

  # print("prompt: ", cur_prompt, end ='')
  ## check if follow up in the query, if not, make sure it contains the final answer. otherwise re-run until at least one of the 2 is in the response. break after 3 attempts. 
  attempts = 0
  ret_text = ''
  while followup not in ret_text and finalans not in ret_text:
    attempts +=1 
    ret_text = call_gpt(cur_prompt, intermediate)
    print(str(attempts) + " ret_text:", ret_text)
    if attempts == 3: 
      break
  if "final answer is" in ret_text: 
    updated_prompt = cur_prompt + re.findall(r".*?(?=is:)", ret_text)[0] + " is: Let's think step-by-step. "
    ret_text = call_gpt(updated_prompt, intermediate)
  return ret_text

def subsequent_query_builder(curr_prompt, external_answer, intermediate = "\nIntermediate Answer:", followup = "\nFollow up:", finalans= '\nSo the final answer is:'):
  print("curr_prompt: ", curr_prompt)
  curr_prompt += intermediate + ' ' + external_answer + '.'
   ## check if follow up in the query, if not, make sure it contains the final answer. otherwise re-run until at least one of the 2 is in the response. break after 3 attempts. 
  attempts = 0
  ret_text = ''
  while followup not in ret_text and finalans not in ret_text:
    attempts +=1 
    ret_text = call_gpt(curr_prompt, intermediate)
    print("subsequent query " + str(attempts) + " ret_text:", ret_text)
    if attempts == 3: 
      break
  print("ret_text: ", ret_text)
  if "final answer is" in ret_text: 
    updated_prompt = curr_prompt + re.findall(r".*?(?=is:)", ret_text)[0] + " is: Let's think step-by-step. "
    # print("updated_prompt: ", updated_prompt)
    ret_text = call_gpt(updated_prompt, intermediate)
  return ret_text

"""subsequent query builder: 

the way to rebuild the prompt for each subsequent call: 

1. every user response is 'intermediate answer' 
2. until you hit 'so the final answer is: ' you're good
3. 
"""

def prompt_builder(history, intermediate = "\nIntermediate Answer:", followup = "\nFollow up:", finalans= '\nSo the final answer is:'): 
  #set language 
  language = history[1][0]
  #set stack trace 
  stacktrace = history[0][0]
  #set question (hardcoded)
  question = "Any idea how I can solve this problem?"

  # initial prompt
  curr_prompt = prompt[0] +  language + prompt[1] + stacktrace + prompt[2] + question + prompt[3]

  #set subsequent conversation thread
  if len(history) >= 2: #subsequent conversations have occurred 
    curr_prompt += history[1][1] ## get the first response to the stacktrace prompt
    for conversation in history[2:]:
      #grab intermediate answer
      curr_prompt += intermediate + ' ' + conversation[0] + '.'
      #grab the follow up
      curr_prompt += conversation[1]
  return curr_prompt

def chat(message, history):
    history = history or []
    print(len(history))
    if len(history) == 0: ## just the stacktrace
      response = "which language is this in? (python, java, c++, kotlin, etc.)"
    elif len(history) == 1: ## stacktrace + just entered the language 
      # get stacktrace
      stacktrace = history[0][0]
      # get language
      language = message
      # set question (hardcoded for v1)
      question = "Any idea how I can solve this problem?"
      response = initial_query_builder(language, stacktrace, question)
    else: # subsequent prompts
      # get stacktrace
      stacktrace = history[0][0]
      # get language
      language = history[1][0]
      # set question (hardcoded for v1)
      question = "Any idea how I can solve this problem?"
      curr_prompt = prompt_builder(history)
      response = subsequent_query_builder(curr_prompt, message)
      # response = query_builder(language, stacktrace, question)
      print("response: ", response)
    history.append((message, response))
    return history, history
    
def clear(arg): 
  return "Enter your response - feel free to elaborate further, ask questions, etc." 

with gr.Blocks() as demo:
    user_state=gr.State([])
    gr.Markdown("""# StackTrace QA Bot""")
    with gr.Row():
      with gr.Column():
        inp = gr.Textbox(placeholder="enter your stacktrace here")
        btn = gr.Button("Enter message")
        output = gr.Chatbot().style(color_map=("green", "pink"))
        # allow_flagging="never",
        inp.submit(chat, [inp, user_state], [output, user_state])
        inp.submit(clear, inp, inp)
        btn.click(chat, [inp, user_state], [output, user_state])
        btn.click(clear, inp, inp)
    gr.Markdown("""### need help? got feedback? have thoughts? etc. ➜ Join the [Discord](https://discord.gg/KvG3azf39U)""")
    gr.Examples(examples=['''PYTORCH: ---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
/var/folders/49/9g9lxm9d3f3br8zlg2l2fmz80000gn/T/ipykernel_1349/2634282627.py in <module>
----> 1 torch.onnx.export(model, x, "output.onnx")

/opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, custom_opsets, export_modules_as_functions)
    502     """
    503 
--> 504     _export(
    505         model,
    506         args,

/opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in _export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, fixed_batch_size, custom_opsets, add_node_names, onnx_shape_inference, export_modules_as_functions)
   1527             _validate_dynamic_axes(dynamic_axes, model, input_names, output_names)
   1528 
-> 1529             graph, params_dict, torch_out = _model_to_graph(
   1530                 model,
   1531                 args,

/opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in _model_to_graph(model, args, verbose, input_names, output_names, operator_export_type, do_constant_folding, _disable_torch_constant_prop, fixed_batch_size, training, dynamic_axes)
   1113 
   1114     try:
-> 1115         graph = _optimize_graph(
   1116             graph,
   1117             operator_export_type,

/opt/anaconda3/lib/python3.9/site-packages/torch/onnx/utils.py in _optimize_graph(graph, operator_export_type, _disable_torch_constant_prop, fixed_batch_size, params_dict, dynamic_axes, input_names, module)
    580     _C._jit_pass_lint(graph)
    581     _C._jit_pass_onnx_autograd_function_process(graph)
--> 582     C._jit_pass_lower_all_tuples(graph)
    583 
    584     # we now record some ops like ones/zeros

RuntimeError: outerNode->outputs().size() == node->inputs().size() INTERNAL ASSERT FAILED at "/Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/jit/passes/dead_code_elimination.cpp":140, please report a bug to PyTorch.''', '''RUST: error[E0382]: use of moved value: `primes`
 --> src/main.rs:9:31
  |
9 |         if vectorIsPrime(num, primes) {
  |                               ^^^^^^ value moved here, in previous iteration of loop
  |
  = note: move occurs because `primes` has type `std::vec::Vec<u64>`, which does not implement the `Copy` trait
''', "REACT: Uncaught Error: Invariant Violation: Element type is invalid: expected a string (for built-in components) or a class/function (for composite components) but got: object."],inputs=inp, cache_examples=False,)
if __name__ == "__main__":
    demo.launch(debug=True)