iakarshu commited on
Commit
342ae6c
1 Parent(s): 38c6a52

Upload app.py

Browse files

Added app.py file

Files changed (1) hide show
  1. app.py +128 -0
app.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Gradio with DocFormer
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1_XBurG-8jYF4eJJK5VoCJ2Y1v6RV9iAW
8
+ """
9
+
10
+ ## Requirements.txt
11
+ import os
12
+ os.system('pip install pyyaml==5.1')
13
+ ## install PyTesseract
14
+ os.system('pip install -q pytesseract')
15
+
16
+ ## Importing the functions from the DocFormer Repo
17
+ from dataset import create_features
18
+ from modeling import DocFormerEncoder,ResNetFeatureExtractor,DocFormerEmbeddings,LanguageFeatureExtractor
19
+ from transformers import BertTokenizerFast
20
+ from utils import DocFormer
21
+
22
+ ## Hyperparameters
23
+ import torch
24
+
25
+ seed = 42
26
+ target_size = (500, 384)
27
+ max_len = 128
28
+
29
+ ## Setting some hyperparameters
30
+
31
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
32
+
33
+ config = {
34
+ "coordinate_size": 96, ## (768/8), 8 for each of the 8 coordinates of x, y
35
+ "hidden_dropout_prob": 0.1,
36
+ "hidden_size": 768,
37
+ "image_feature_pool_shape": [7, 7, 256],
38
+ "intermediate_ff_size_factor": 4,
39
+ "max_2d_position_embeddings": 1024,
40
+ "max_position_embeddings": 128,
41
+ "max_relative_positions": 8,
42
+ "num_attention_heads": 12,
43
+ "num_hidden_layers": 12,
44
+ "pad_token_id": 0,
45
+ "shape_size": 96,
46
+ "vocab_size": 30522,
47
+ "layer_norm_eps": 1e-12,
48
+ }
49
+
50
+ ## Defining the tokenizer
51
+ tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
52
+
53
+
54
+
55
+ docformer = DocFormer(config)
56
+
57
+ # path_to_weights = 'drive/MyDrive/docformer_rvl_checkpoint/docformer_v1.ckpt'
58
+
59
+ url = 'https://www.kaggleusercontent.com/kf/97691030/eyJhbGciOiJkaXIiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2In0..64MVC5RwlflRqMaApK2jLw.rDiswzBHQcP_1_7vsHlJgSGKLdOqVB-d4hcGP6kQs5vEAdBmOzXL6XY9MleO3A4Sk0D5RB9QGeOyp7MuBZoHJbZ0gOVz6iRsats32fz2OU1yqQt22HIigL2mD_7mrTMn5IkP7KwsxtMMEuaOPEzFh1z8JQ9eE_NFBxIkOFF_Bp62a7agvDPL3HxzmxFQ7pwrYv9ZjYNfbDeeBuHu5J_MT_wHE5hOT1FENIMhebg3Q9l7eegUZD3eCMV4QoI_HsU6NZjyZOQcpVFmU6exYz8hGnFUa_V03870N6VnTkox78td0OXH29o3bYGSWneuCc86qSHKj5I1m8KbmCenPT6zU6IQINXp8BGLVlLOHdwVAPapR4X4CqSiK3Wgt5JINfpfVjQYWo2gDkAwJI026-fdLAfJQUI6mYGd-ERpyL5ZIbdkpesTslstOtlzoNT9gp_USW6aINxO8DranfK3-PiMZ_X1zHsK1vscRpO9gohNhuOg362ijjl3FQrw48-YbYfykQFfVwQpnhYQ9Q6d5gNANfJMrzH92DlpQFBaPOLcze1BAVdM4zmVGdt8Jo-Knk1JADpNizHWmF19eDxudQO_ZCxvXWpc8v3LOh-HpA2mBB0HI1DZ4cqcMETtOwas5wzHrLqDLRJpso6BKOgz78kIZJDdj6rr7yY4QVWpVOOdNZ8.VZzPPNhnz_MUdNnc5DaZOw/models/epoch=0-step=753.ckpt'
60
+
61
+ docformer.load_from_checkpoint(url)
62
+
63
+ id2label = ['scientific_report',
64
+ 'resume',
65
+ 'memo',
66
+ 'file_folder',
67
+ 'specification',
68
+ 'news_article',
69
+ 'letter',
70
+ 'form',
71
+ 'budget',
72
+ 'handwritten',
73
+ 'email',
74
+ 'invoice',
75
+ 'presentation',
76
+ 'scientific_publication',
77
+ 'questionnaire',
78
+ 'advertisement']
79
+
80
+ import gradio as gr
81
+
82
+ ## Taken from LayoutLMV2 space
83
+
84
+ image = gr.inputs.Image(type="pil")
85
+ label = gr.outputs.Label(num_top_classes=5)
86
+ examples = [['00093726.png'], ['00866042.png']]
87
+ title = "Interactive demo: DocFormer for Image Classification"
88
+ description = "Demo for classifying document images with DocFormer model. To use it, \
89
+ simply upload an image or use the example images below and click 'submit' to let the model predict the 5 most probable Document classes. \
90
+ Results will show up in a few seconds."
91
+
92
+ def classify_image(image):
93
+
94
+ image.save('sample_img.png')
95
+ final_encoding = create_features(
96
+ './sample_img.png',
97
+ tokenizer,
98
+ add_batch_dim=True,
99
+ target_size=target_size,
100
+ max_seq_length=max_len,
101
+ path_to_save=None,
102
+ save_to_disk=False,
103
+ apply_mask_for_mlm=False,
104
+ extras_for_debugging=False,
105
+ use_ocr = True
106
+ )
107
+
108
+ keys_to_reshape = ['x_features', 'y_features', 'resized_and_aligned_bounding_boxes']
109
+ for key in keys_to_reshape:
110
+ final_encoding[key] = final_encoding[key][:, :max_len]
111
+
112
+ from torchvision import transforms
113
+ # ## Normalization to these mean and std (I have seen some tutorials used this, and also in image reconstruction, so used it)
114
+ transform = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
115
+
116
+ final_encoding['resized_scaled_img'] = transform(final_encoding['resized_scaled_img'])
117
+ output = docformer.forward(final_encoding)
118
+ output = output[0].softmax(axis = -1)
119
+
120
+ final_pred = {}
121
+ for i, score in enumerate(output):
122
+ score = output[i]
123
+ final_pred[id2label[i]] = score.detach().cpu().tolist()
124
+
125
+ return final_pred
126
+
127
+ gr.Interface(fn=classify_image, inputs=image, outputs=label, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)
128
+