3v324v23 commited on
Commit
167be41
1 Parent(s): a71edee

pdfminer整合到一个文件中

Browse files
crazy_functions/批量总结PDF文档.py CHANGED
@@ -11,7 +11,6 @@ def 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, histor
11
  file_content = ""
12
  for page in doc:
13
  file_content += page.get_text()
14
- file_content = file_content.encode('gbk', 'ignore').decode('gbk')
15
  print(file_content)
16
 
17
  prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
 
11
  file_content = ""
12
  for page in doc:
13
  file_content += page.get_text()
 
14
  print(file_content)
15
 
16
  prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
crazy_functions/批量总结PDF文档pdfminer.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from predict import predict_no_ui
2
+ from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
3
+
4
+ fast_debug = False
5
+
6
+ def readPdf(pdfPath):
7
+ """
8
+ 读取pdf文件,返回文本内容
9
+ """
10
+ import pdfminer
11
+ from pdfminer.pdfparser import PDFParser
12
+ from pdfminer.pdfdocument import PDFDocument
13
+ from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed
14
+ from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
15
+ from pdfminer.pdfdevice import PDFDevice
16
+ from pdfminer.layout import LAParams
17
+ from pdfminer.converter import PDFPageAggregator
18
+
19
+ fp = open(pdfPath, 'rb')
20
+
21
+ # Create a PDF parser object associated with the file object
22
+ parser = PDFParser(fp)
23
+
24
+ # Create a PDF document object that stores the document structure.
25
+ # Password for initialization as 2nd parameter
26
+ document = PDFDocument(parser)
27
+ # Check if the document allows text extraction. If not, abort.
28
+ if not document.is_extractable:
29
+ raise PDFTextExtractionNotAllowed
30
+
31
+ # Create a PDF resource manager object that stores shared resources.
32
+ rsrcmgr = PDFResourceManager()
33
+
34
+ # Create a PDF device object.
35
+ # device = PDFDevice(rsrcmgr)
36
+
37
+ # BEGIN LAYOUT ANALYSIS.
38
+ # Set parameters for analysis.
39
+ laparams = LAParams(
40
+ char_margin=10.0,
41
+ line_margin=0.2,
42
+ boxes_flow=0.2,
43
+ all_texts=False,
44
+ )
45
+ # Create a PDF page aggregator object.
46
+ device = PDFPageAggregator(rsrcmgr, laparams=laparams)
47
+ # Create a PDF interpreter object.
48
+ interpreter = PDFPageInterpreter(rsrcmgr, device)
49
+
50
+ # loop over all pages in the document
51
+ outTextList = []
52
+ for page in PDFPage.create_pages(document):
53
+ # read the page into a layout object
54
+ interpreter.process_page(page)
55
+ layout = device.get_result()
56
+ for obj in layout._objs:
57
+ if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal):
58
+ # print(obj.get_text())
59
+ outTextList.append(obj.get_text())
60
+
61
+ return outTextList
62
+
63
+
64
+ def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
65
+ import time, glob, os
66
+ from bs4 import BeautifulSoup
67
+ print('begin analysis on:', file_manifest)
68
+ for index, fp in enumerate(file_manifest):
69
+ if ".tex" in fp:
70
+ with open(fp, 'r', encoding='utf-8') as f:
71
+ file_content = f.read()
72
+ if ".pdf" in fp.lower():
73
+ file_content = readPdf(fp)
74
+ file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk')
75
+
76
+ prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
77
+ i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
78
+ i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
79
+ chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
80
+ print('[1] yield chatbot, history')
81
+ yield chatbot, history, '正常'
82
+
83
+ if not fast_debug:
84
+ msg = '正常'
85
+ # ** gpt request **
86
+ gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
87
+
88
+ print('[2] end gpt req')
89
+ chatbot[-1] = (i_say_show_user, gpt_say)
90
+ history.append(i_say_show_user); history.append(gpt_say)
91
+ print('[3] yield chatbot, history')
92
+ yield chatbot, history, msg
93
+ print('[4] next')
94
+ if not fast_debug: time.sleep(2)
95
+
96
+ all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
97
+ i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
98
+ chatbot.append((i_say, "[Local Message] waiting gpt response."))
99
+ yield chatbot, history, '正常'
100
+
101
+ if not fast_debug:
102
+ msg = '正常'
103
+ # ** gpt request **
104
+ gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
105
+
106
+ chatbot[-1] = (i_say, gpt_say)
107
+ history.append(i_say); history.append(gpt_say)
108
+ yield chatbot, history, msg
109
+ res = write_results_to_file(history)
110
+ chatbot.append(("完成了吗?", res))
111
+ yield chatbot, history, msg
112
+
113
+
114
+
115
+ @CatchException
116
+ def 批量总结PDF文档pdfminer(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
117
+ history = [] # 清空历史,以免输入溢出
118
+ import glob, os
119
+
120
+ # 基本信息:功能、贡献者
121
+ chatbot.append([
122
+ "函数插件功能?",
123
+ "批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"])
124
+ yield chatbot, history, '正常'
125
+
126
+ # 尝试导入依赖,如果缺少依赖,则给出安装建议
127
+ try:
128
+ import pdfminer, bs4
129
+ except:
130
+ report_execption(chatbot, history,
131
+ a = f"解析项目: {txt}",
132
+ b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
133
+ yield chatbot, history, '正常'
134
+ return
135
+ if os.path.exists(txt):
136
+ project_folder = txt
137
+ else:
138
+ if txt == "": txt = '空空如也的输入栏'
139
+ report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
140
+ yield chatbot, history, '正常'
141
+ return
142
+ file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
143
+ [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
144
+ # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
145
+ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
146
+ if len(file_manifest) == 0:
147
+ report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
148
+ yield chatbot, history, '正常'
149
+ return
150
+ yield from 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
151
+
crazy_functions/读文章写摘要.py CHANGED
@@ -1,19 +1,14 @@
1
  from predict import predict_no_ui
2
- from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down, readPdf
3
  fast_debug = False
4
- from bs4 import BeautifulSoup
5
 
6
 
7
  def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
8
  import time, glob, os
9
  print('begin analysis on:', file_manifest)
10
  for index, fp in enumerate(file_manifest):
11
- if ".tex" in fp:
12
- with open(fp, 'r', encoding='utf-8') as f:
13
- file_content = f.read()
14
- if ".pdf" in fp.lower():
15
- file_content = readPdf(fp)
16
- file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk')
17
 
18
  prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
19
  i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
@@ -22,7 +17,7 @@ def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, hist
22
  print('[1] yield chatbot, history')
23
  yield chatbot, history, '正常'
24
 
25
- if not fast_debug:
26
  msg = '正常'
27
  # ** gpt request **
28
  gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
@@ -40,7 +35,7 @@ def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, hist
40
  chatbot.append((i_say, "[Local Message] waiting gpt response."))
41
  yield chatbot, history, '正常'
42
 
43
- if not fast_debug:
44
  msg = '正常'
45
  # ** gpt request **
46
  gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
@@ -65,12 +60,11 @@ def 读文章写摘要(txt, top_p, temperature, chatbot, history, systemPromptTx
65
  report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
66
  yield chatbot, history, '正常'
67
  return
68
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
69
- [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
70
  # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
71
  # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
72
  if len(file_manifest) == 0:
73
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
74
  yield chatbot, history, '正常'
75
  return
76
  yield from 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
 
1
  from predict import predict_no_ui
2
+ from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
3
  fast_debug = False
 
4
 
5
 
6
  def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
7
  import time, glob, os
8
  print('begin analysis on:', file_manifest)
9
  for index, fp in enumerate(file_manifest):
10
+ with open(fp, 'r', encoding='utf-8') as f:
11
+ file_content = f.read()
 
 
 
 
12
 
13
  prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
14
  i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
 
17
  print('[1] yield chatbot, history')
18
  yield chatbot, history, '正常'
19
 
20
+ if not fast_debug:
21
  msg = '正常'
22
  # ** gpt request **
23
  gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
 
35
  chatbot.append((i_say, "[Local Message] waiting gpt response."))
36
  yield chatbot, history, '正常'
37
 
38
+ if not fast_debug:
39
  msg = '正常'
40
  # ** gpt request **
41
  gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
 
60
  report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
61
  yield chatbot, history, '正常'
62
  return
63
+ file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
 
64
  # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
65
  # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
66
  if len(file_manifest) == 0:
67
+ report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
68
  yield chatbot, history, '正常'
69
  return
70
  yield from 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
functional_crazy.py CHANGED
@@ -30,7 +30,7 @@ def get_crazy_functionals():
30
  "Color": "stop", # 按钮颜色
31
  "Function": 解析一个C项目
32
  },
33
- "读tex or pdf论文写摘要": {
34
  "Color": "stop", # 按钮颜色
35
  "Function": 读文章写摘要
36
  },
@@ -55,7 +55,13 @@ def get_crazy_functionals():
55
  "Function": 批量总结PDF文档
56
  },
57
  })
58
-
 
 
 
 
 
 
59
  # VisibleLevel=2 尚未充分测试的函数插件,放在这里
60
  if UserVisibleLevel >= 2:
61
  function_plugins.update({
 
30
  "Color": "stop", # 按钮颜色
31
  "Function": 解析一个C项目
32
  },
33
+ "读tex论文写摘要": {
34
  "Color": "stop", # 按钮颜色
35
  "Function": 读文章写摘要
36
  },
 
55
  "Function": 批量总结PDF文档
56
  },
57
  })
58
+ from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer
59
+ function_plugins.update({
60
+ "[仅供开发调试] 批量总结PDF文档pdfminer": {
61
+ "Color": "stop",
62
+ "Function": 批量总结PDF文档pdfminer
63
+ },
64
+ })
65
  # VisibleLevel=2 尚未充分测试的函数插件,放在这里
66
  if UserVisibleLevel >= 2:
67
  function_plugins.update({
requirements.txt CHANGED
@@ -1,10 +1,5 @@
1
  gradio>=3.23
2
- requests[socks]~=2.28.2
3
- mdtex2html~=1.2.0
4
- Markdown~=3.4.3
5
- latex2mathml~=3.75.1
6
- bs4~=0.0.1
7
- lxml~=4.6.4
8
- beautifulsoup4~=4.12.0
9
- numpy~=1.24.2
10
- pdfminer.six
 
1
  gradio>=3.23
2
+ requests[socks]
3
+ mdtex2html
4
+ Markdown
5
+ latex2mathml
 
 
 
 
 
toolbox.py CHANGED
@@ -1,14 +1,6 @@
1
  import markdown, mdtex2html, threading, importlib, traceback
2
  from show_math import convert as convert_math
3
  from functools import wraps
4
- import pdfminer
5
- from pdfminer.pdfparser import PDFParser
6
- from pdfminer.pdfdocument import PDFDocument
7
- from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed
8
- from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
9
- from pdfminer.pdfdevice import PDFDevice
10
- from pdfminer.layout import LAParams
11
- from pdfminer.converter import PDFPageAggregator
12
 
13
  def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], sys_prompt=''):
14
  """
@@ -243,52 +235,4 @@ def clear_line_break(txt):
243
  txt = txt.replace('\n', ' ')
244
  txt = txt.replace(' ', ' ')
245
  txt = txt.replace(' ', ' ')
246
- return txt
247
-
248
- def readPdf(pdfPath):
249
- """
250
- 读取pdf文件,返回文本内容
251
- """
252
- fp = open(pdfPath, 'rb')
253
-
254
- # Create a PDF parser object associated with the file object
255
- parser = PDFParser(fp)
256
-
257
- # Create a PDF document object that stores the document structure.
258
- # Password for initialization as 2nd parameter
259
- document = PDFDocument(parser)
260
- # Check if the document allows text extraction. If not, abort.
261
- if not document.is_extractable:
262
- raise PDFTextExtractionNotAllowed
263
-
264
- # Create a PDF resource manager object that stores shared resources.
265
- rsrcmgr = PDFResourceManager()
266
-
267
- # Create a PDF device object.
268
- # device = PDFDevice(rsrcmgr)
269
-
270
- # BEGIN LAYOUT ANALYSIS.
271
- # Set parameters for analysis.
272
- laparams = LAParams(
273
- char_margin=10.0,
274
- line_margin=0.2,
275
- boxes_flow=0.2,
276
- all_texts=False,
277
- )
278
- # Create a PDF page aggregator object.
279
- device = PDFPageAggregator(rsrcmgr, laparams=laparams)
280
- # Create a PDF interpreter object.
281
- interpreter = PDFPageInterpreter(rsrcmgr, device)
282
-
283
- # loop over all pages in the document
284
- outTextList = []
285
- for page in PDFPage.create_pages(document):
286
- # read the page into a layout object
287
- interpreter.process_page(page)
288
- layout = device.get_result()
289
- for obj in layout._objs:
290
- if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal):
291
- # print(obj.get_text())
292
- outTextList.append(obj.get_text())
293
-
294
- return outTextList
 
1
  import markdown, mdtex2html, threading, importlib, traceback
2
  from show_math import convert as convert_math
3
  from functools import wraps
 
 
 
 
 
 
 
 
4
 
5
  def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], sys_prompt=''):
6
  """
 
235
  txt = txt.replace('\n', ' ')
236
  txt = txt.replace(' ', ' ')
237
  txt = txt.replace(' ', ' ')
238
+ return txt