JaMe76 commited on
Commit
bc391b2
Β·
1 Parent(s): 403f9fb
Files changed (4) hide show
  1. app.py +135 -0
  2. packages.txt +1 -0
  3. requirements.txt +4 -0
  4. sample_2.png +0 -0
app.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.system('pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html')
3
+ credentials_kwargs={"aws_access_key_id": os.environ["ACCESS_KEY"],"aws_secret_access_key": os.environ["SECRET_KEY"]}
4
+
5
+ # work around: https://discuss.huggingface.co/t/how-to-install-a-specific-version-of-gradio-in-spaces/13552
6
+ os.system("pip uninstall -y gradio")
7
+ os.system("pip install gradio==3.4.1")
8
+ os.system(os.environ["DD_ADDONS"])
9
+
10
+ import time
11
+ from os import getcwd, path
12
+
13
+ import deepdoctection as dd
14
+ from deepdoctection.dataflow.serialize import DataFromList
15
+ from deepdoctection.utils.settings import get_type
16
+
17
+ from dd_addons.analyzer.loader import get_loader
18
+ from dd_addons.extern.guidance import TOKEN_DEFAULT_INSTRUCTION
19
+ from dd_addons.utils.settings import register_llm_token_tag, register_string_categories_from_list
20
+ from dd_addons.extern.openai import OpenAiLmmTokenClassifier
21
+
22
+ import gradio as gr
23
+
24
+ analyzer = get_loader(reset_config_file=True)
25
+
26
+ demo = gr.Blocks(css="scrollbar.css")
27
+
28
+
29
+ def process_analyzer(openai_api_key, categories_str, instruction_str, img, pdf, max_datapoints):
30
+ categories_list = categories_str.split(",")
31
+ register_string_categories_from_list(categories_list, "custom_token_classes")
32
+ custom_token_class = dd.object_types_registry.get("custom_token_classes")
33
+ print([token_class for token_class in custom_token_class])
34
+ register_llm_token_tag([token_class for token_class in custom_token_class])
35
+ categories = {
36
+ str(idx + 1): get_type(val) for idx, val in enumerate(categories_list)
37
+ }
38
+
39
+ gpt_token_classifier = OpenAiLmmTokenClassifier(
40
+ model_name="gpt-3.5-turbo",
41
+ categories=categories,
42
+ api_key=openai_api_key,
43
+ instruction= instruction_str if instruction_str else None,
44
+ )
45
+ analyzer.pipe_component_list[8].language_model = gpt_token_classifier
46
+
47
+ if img is not None:
48
+ image = dd.Image(file_name=str(time.time()).replace(".","") + ".png", location="")
49
+ image.image = img[:, :, ::-1]
50
+
51
+ df = DataFromList(lst=[image])
52
+ df = analyzer.analyze(dataset_dataflow=df)
53
+ elif pdf:
54
+ df = analyzer.analyze(path=pdf.name, max_datapoints=max_datapoints)
55
+ else:
56
+ raise ValueError
57
+
58
+ df.reset_state()
59
+
60
+ json_out = {}
61
+ dpts = []
62
+
63
+ for idx, dp in enumerate(df):
64
+ dpts.append(dp)
65
+ json_out[f"page_{idx}"] = dp.get_token()
66
+
67
+ return [dp.viz(show_cells=False, show_layouts=False, show_tables=False, show_words=True, show_token_class=True, ignore_default_token_class=True)
68
+ for dp in dpts], json_out
69
+
70
+
71
+ with demo:
72
+ with gr.Box():
73
+ gr.Markdown("<h1><center>Document AI GPT</center></h1>")
74
+ gr.Markdown("<h2 ><center>Zero or few-shot Entity Extraction powered by ChatGPT and <strong>deep</strong>doctection </center></h2>"
75
+ "<center>This pipeline consists of a stack of models powered for layout analysis and table recognition "
76
+ "to prepare a prompt for ChatGPT. </center>"
77
+ "<center>Be aware! The Space is still very fragile.</center><br />")
78
+ with gr.Box():
79
+ gr.Markdown("<h2><center>Upload a document and choose setting</center></h2>")
80
+ with gr.Row():
81
+ with gr.Column():
82
+ with gr.Tab("Image upload"):
83
+ with gr.Column():
84
+ inputs = gr.Image(type='numpy', label="Original Image")
85
+ with gr.Tab("PDF upload *"):
86
+ with gr.Column():
87
+ inputs_pdf = gr.File(label="PDF")
88
+ gr.Markdown("<sup>* If an image is cached in tab, remove it first</sup>")
89
+ with gr.Box():
90
+ gr.Examples(
91
+ examples=[path.join(getcwd(), "sample_2.png")],
92
+ inputs = inputs)
93
+ with gr.Box():
94
+ gr.Markdown("Enter your OpenAI API Key* ")
95
+ user_token = gr.Textbox(value='', placeholder="OpenAI API Key", type="password", show_label=False)
96
+ gr.Markdown("<sup>* Your API key will not be saved. However, it is always recommended to deactivate the"
97
+ "API key once it is entered into an unknown source</sup>")
98
+ with gr.Column():
99
+ with gr.Box():
100
+ gr.Markdown(
101
+ "Enter a list of comma seperated entities. Use a snake case style. Avoid special characters. "
102
+ "Best way is to only use `a-z` and `_`")
103
+ categories = gr.Textbox(value='', placeholder="mitarbeiter_anzahl", show_label=False)
104
+ with gr.Box():
105
+ gr.Markdown("Optional: Enter a prompt for additional guidance. Will use the placeholder as fallback")
106
+ instruction = gr.Textbox(value='', placeholder=TOKEN_DEFAULT_INSTRUCTION, show_label=False)
107
+ with gr.Row():
108
+ max_imgs = gr.Slider(1, 3, value=1, step=1, label="Number of pages in multi page PDF",
109
+ info="Will stop after 3 pages")
110
+
111
+ with gr.Row():
112
+ btn = gr.Button("Run model", variant="primary")
113
+
114
+ with gr.Box():
115
+ gr.Markdown("<h2><center>Outputs</center></h2>")
116
+ with gr.Row():
117
+ with gr.Column():
118
+ with gr.Box():
119
+ gr.Markdown("<center><strong>JSON</strong></center>")
120
+ json = gr.JSON()
121
+ with gr.Column():
122
+ with gr.Box():
123
+ gr.Markdown("<center><strong>Layout detection</strong></center>")
124
+ gallery = gr.Gallery(
125
+ label="Output images", show_label=False, elem_id="gallery"
126
+ ).style(grid=2)
127
+ with gr.Row():
128
+ with gr.Box():
129
+ gr.Markdown("<center><strong>Table</strong></center>")
130
+ html = gr.HTML()
131
+
132
+ btn.click(fn=process_analyzer, inputs=[user_token, categories, instruction, inputs, inputs_pdf, max_imgs],
133
+ outputs=[gallery, json])
134
+
135
+ demo.launch()
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ poppler-utils
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Pillow==9.5.0
2
+ torch==1.12.0
3
+ torchvision==0.13.0
4
+ git+https://github.com/deepdoctection/deepdoctection#egg=deepdoctection[hf]
sample_2.png ADDED