3v324v23 commited on
Commit
61b4ea6
1 Parent(s): 51bde97

introduce project self-translation

Browse files
crazy_functions/全项目切换英文.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ from predict import predict_no_ui_long_connection
3
+ from toolbox import CatchException, write_results_to_file
4
+
5
+
6
+
7
+ @CatchException
8
+ def 全项目切换英文(txt, top_p, temperature, chatbot, history, sys_prompt, WEB_PORT):
9
+ history = [] # 清空历史,以免输入溢出
10
+ # 集合文件
11
+ import time, glob, os
12
+ file_manifest = [f for f in glob.glob('./**/*.py', recursive=True) if ('test_project' not in f) and ('gpt_log' not in f)]
13
+ i_say_show_user_buffer = []
14
+
15
+ # 随便显示点什么防止卡顿
16
+ for index, fp in enumerate(file_manifest):
17
+ # if 'test_project' in fp: continue
18
+ with open(fp, 'r', encoding='utf-8') as f:
19
+ file_content = f.read()
20
+ i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出代码: {os.path.abspath(fp)}'
21
+ i_say_show_user_buffer.append(i_say_show_user)
22
+ chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
23
+ yield chatbot, history, '正常'
24
+
25
+ # 任务函数
26
+ mutable_return = [None for _ in file_manifest]
27
+ def thread_worker(fp,index):
28
+ with open(fp, 'r', encoding='utf-8') as f:
29
+ file_content = f.read()
30
+ i_say = f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```'
31
+ # ** gpt request **
32
+ gpt_say = predict_no_ui_long_connection(inputs=i_say, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt)
33
+ mutable_return[index] = gpt_say
34
+
35
+ # 所有线程同时开始执行任务函数
36
+ handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)]
37
+ for h in handles:
38
+ h.daemon = True
39
+ h.start()
40
+
41
+ # 等待各个线程逐一完成
42
+ for index, h in enumerate(handles):
43
+ h.join()
44
+ fp = file_manifest[index]
45
+ gpt_say = mutable_return[index]
46
+ i_say_show_user = i_say_show_user_buffer[index]
47
+ os.makedirs('gpt_log/generated_english_version', exist_ok=True)
48
+ os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
49
+ where_to_relocate = f'gpt_log/generated_english_version/{fp}'
50
+ with open(where_to_relocate, 'w+', encoding='utf-8') as f: f.write(gpt_say.lstrip('```').rstrip('```'))
51
+ chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
52
+ history.append(i_say_show_user); history.append(gpt_say)
53
+ yield chatbot, history, '正常'
54
+ time.sleep(2)
55
+
56
+ # 结束
57
+ res = write_results_to_file(history)
58
+ chatbot.append(("完成了吗?", res))
59
+ yield chatbot, history, '正常'
functional_crazy.py CHANGED
@@ -7,11 +7,16 @@ def get_crazy_functionals():
7
  from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
8
  from crazy_functions.解析项目源代码 import 解析一个C项目
9
  from crazy_functions.高级功能函数模板 import 高阶功能模板函数
 
10
 
11
  return {
12
  "[实验] 请解析并解构此项目本身": {
13
  "Function": 解析项目本身
14
  },
 
 
 
 
15
  "[实验] 解析整个py项目(配合input输入框)": {
16
  "Color": "stop", # 按钮颜色
17
  "Function": 解析一个Python项目
 
7
  from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
8
  from crazy_functions.解析项目源代码 import 解析一个C项目
9
  from crazy_functions.高级功能函数模板 import 高阶功能模板函数
10
+ from crazy_functions.全项目切换英文 import 全项目切换英文
11
 
12
  return {
13
  "[实验] 请解析并解构此项目本身": {
14
  "Function": 解析项目本身
15
  },
16
+ "[实验] 把此项目代码切换成全英文": {
17
+ "Color": "stop", # 按钮颜色
18
+ "Function": 全项目切换英文
19
+ },
20
  "[实验] 解析整个py项目(配合input输入框)": {
21
  "Color": "stop", # 按钮颜色
22
  "Function": 解析一个Python项目
predict.py CHANGED
@@ -25,7 +25,7 @@ def get_full_error(chunk, stream_response):
25
  break
26
  return chunk
27
 
28
- def predict_no_ui(inputs, top_p, temperature, history=[]):
29
  """
30
  发送至chatGPT,等待回复,一次性完成,不显示中间过程。
31
  predict函数的简化版。
@@ -36,7 +36,7 @@ def predict_no_ui(inputs, top_p, temperature, history=[]):
36
  history 是之前的对话列表
37
  (注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误,然后raise ConnectionAbortedError)
38
  """
39
- headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt="", stream=False)
40
 
41
  retry = 0
42
  while True:
@@ -47,8 +47,8 @@ def predict_no_ui(inputs, top_p, temperature, history=[]):
47
  except requests.exceptions.ReadTimeout as e:
48
  retry += 1
49
  traceback.print_exc()
50
- if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
51
  if retry > MAX_RETRY: raise TimeoutError
 
52
 
53
  try:
54
  result = json.loads(response.text)["choices"][0]["message"]["content"]
@@ -58,6 +58,40 @@ def predict_no_ui(inputs, top_p, temperature, history=[]):
58
  raise ConnectionAbortedError("Json解析不合常规,可能是文本过长" + response.text)
59
 
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='',
62
  stream = True, additional_fn=None):
63
  """
 
25
  break
26
  return chunk
27
 
28
+ def predict_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""):
29
  """
30
  发送至chatGPT,等待回复,一次性完成,不显示中间过程。
31
  predict函数的简化版。
 
36
  history 是之前的对话列表
37
  (注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误,然后raise ConnectionAbortedError)
38
  """
39
+ headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt=sys_prompt, stream=False)
40
 
41
  retry = 0
42
  while True:
 
47
  except requests.exceptions.ReadTimeout as e:
48
  retry += 1
49
  traceback.print_exc()
 
50
  if retry > MAX_RETRY: raise TimeoutError
51
+ if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
52
 
53
  try:
54
  result = json.loads(response.text)["choices"][0]["message"]["content"]
 
58
  raise ConnectionAbortedError("Json解析不合常规,可能是文本过长" + response.text)
59
 
60
 
61
+ def predict_no_ui_long_connection(inputs, top_p, temperature, history=[], sys_prompt=""):
62
+ """
63
+ 发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免有人中途掐网线。
64
+ """
65
+ headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt=sys_prompt, stream=True)
66
+
67
+ retry = 0
68
+ while True:
69
+ try:
70
+ # make a POST request to the API endpoint, stream=False
71
+ response = requests.post(API_URL, headers=headers, proxies=proxies,
72
+ json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
73
+ except requests.exceptions.ReadTimeout as e:
74
+ retry += 1
75
+ traceback.print_exc()
76
+ if retry > MAX_RETRY: raise TimeoutError
77
+ if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
78
+
79
+ stream_response = response.iter_lines()
80
+ result = ''
81
+ while True:
82
+ try: chunk = next(stream_response).decode()
83
+ except StopIteration: break
84
+ if len(chunk)==0: continue
85
+ if not chunk.startswith('data:'):
86
+ raise ConnectionAbortedError("OpenAI返回了错误:" + chunk)
87
+ delta = json.loads(chunk.lstrip('data:'))['choices'][0]["delta"]
88
+ if len(delta) == 0: break
89
+ if "role" in delta: continue
90
+ if "content" in delta: result += delta["content"]; print(delta["content"], end='')
91
+ else: raise RuntimeError("意外Json结构:"+delta)
92
+ return result
93
+
94
+
95
  def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='',
96
  stream = True, additional_fn=None):
97
  """
toolbox.py CHANGED
@@ -2,7 +2,7 @@ import markdown, mdtex2html, threading
2
  from show_math import convert as convert_math
3
  from functools import wraps
4
 
5
- def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]):
6
  """
7
  调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
8
  """
@@ -17,7 +17,7 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temp
17
  def mt(i_say, history):
18
  while True:
19
  try:
20
- mutable[0] = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
21
  break
22
  except ConnectionAbortedError as e:
23
  if len(history) > 0:
@@ -27,7 +27,8 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temp
27
  i_say = i_say[:len(i_say)//2]
28
  mutable[1] = 'Warning! Input file is too long, cut into half. '
29
  except TimeoutError as e:
30
- mutable[0] = '[Local Message] Failed with timeout'
 
31
  # 创建新线程发出http请求
32
  thread_name = threading.Thread(target=mt, args=(i_say, history)); thread_name.start()
33
  # 原来的线程则负责持续更新UI,实现一个超时倒计时,并等待新线程的任务完成
@@ -39,6 +40,7 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temp
39
  time.sleep(1)
40
  # 把gpt的输出从mutable中取出来
41
  gpt_say = mutable[0]
 
42
  return gpt_say
43
 
44
  def write_results_to_file(history, file_name=None):
 
2
  from show_math import convert as convert_math
3
  from functools import wraps
4
 
5
+ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], sys_prompt=''):
6
  """
7
  调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
8
  """
 
17
  def mt(i_say, history):
18
  while True:
19
  try:
20
+ mutable[0] = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt)
21
  break
22
  except ConnectionAbortedError as e:
23
  if len(history) > 0:
 
27
  i_say = i_say[:len(i_say)//2]
28
  mutable[1] = 'Warning! Input file is too long, cut into half. '
29
  except TimeoutError as e:
30
+ mutable[0] = '[Local Message] Failed with timeout.'
31
+ raise TimeoutError
32
  # 创建新线程发出http请求
33
  thread_name = threading.Thread(target=mt, args=(i_say, history)); thread_name.start()
34
  # 原来的线程则负责持续更新UI,实现一个超时倒计时,并等待新线程的任务完成
 
40
  time.sleep(1)
41
  # 把gpt的输出从mutable中取出来
42
  gpt_say = mutable[0]
43
+ if gpt_say=='[Local Message] Failed with timeout.': raise TimeoutError
44
  return gpt_say
45
 
46
  def write_results_to_file(history, file_name=None):