acverma commited on
Commit
2e055fc
1 Parent(s): 97c2262

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -16
app.py CHANGED
@@ -52,29 +52,29 @@ processor = LayoutLMv3Processor.from_pretrained("microsoft/layoutlmv3-base",appl
52
  model = LayoutLMv3ForTokenClassification.from_pretrained("nielsr/layoutlmv3-finetuned-funsd")
53
 
54
  dataset = load_dataset("nielsr/funsd", split="test")
55
- image = Image.open(dataset[0]["image_path"]).convert("RGB")
56
- image = Image.open("./invoice.png")
57
- image.save("document1.png")
58
 
59
- image = Image.open(dataset[1]["image_path"]).convert("RGB")
60
- image = Image.open("./invoice2.png")
61
- image.save("document2.png")
62
 
63
- image = Image.open(dataset[2]["image_path"]).convert("RGB")
64
- image = Image.open("./invoice3.png")
65
- image.save("document3.png")
66
 
67
 
68
  #dataset = load_dataset("nielsr/funsd-layoutlmv3")
69
 
70
- #example = dataset["test"][0]
71
- #example["image"].save("example1.png")
72
 
73
- #example1 = dataset["test"][1]
74
- #example1["image"].save("example2.png")
75
 
76
- #example2 = dataset["test"][2]
77
- #example2["image"].save("example3.png")
78
 
79
  #example2["image"]
80
 
@@ -151,7 +151,7 @@ description = "Extraction of Form or Invoice Extraction - We use Microsoft's Lay
151
 
152
  article="<b>References</b><br>[1] Y. Xu et al., “LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking.” 2022. <a href='https://arxiv.org/abs/2204.08387'>Paper Link</a><br>[2] <a href='https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3'>LayoutLMv3 training and inference</a>"
153
 
154
- examples =[['document1.png'],['document1.png'],['document1.png']]
155
 
156
  css = """.output_image, .input_image {height: 600px !important}"""
157
 
 
52
  model = LayoutLMv3ForTokenClassification.from_pretrained("nielsr/layoutlmv3-finetuned-funsd")
53
 
54
  dataset = load_dataset("nielsr/funsd", split="test")
55
+ #image = Image.open(dataset[0]["image_path"]).convert("RGB")
56
+ #image = Image.open("./invoice.png")
57
+ #image.save("document1.png")
58
 
59
+ #image = Image.open(dataset[1]["image_path"]).convert("RGB")
60
+ #image = Image.open("./invoice2.png")
61
+ #image.save("document2.png")
62
 
63
+ #image = Image.open(dataset[2]["image_path"]).convert("RGB")
64
+ #image = Image.open("./invoice3.png")
65
+ #image.save("document3.png")
66
 
67
 
68
  #dataset = load_dataset("nielsr/funsd-layoutlmv3")
69
 
70
+ example = dataset["test"][0]
71
+ example["image"].save("example1.png")
72
 
73
+ example1 = dataset["test"][1]
74
+ example1["image"].save("example2.png")
75
 
76
+ example2 = dataset["test"][2]
77
+ example2["image"].save("example3.png")
78
 
79
  #example2["image"]
80
 
 
151
 
152
  article="<b>References</b><br>[1] Y. Xu et al., “LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking.” 2022. <a href='https://arxiv.org/abs/2204.08387'>Paper Link</a><br>[2] <a href='https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3'>LayoutLMv3 training and inference</a>"
153
 
154
+ examples =[['example1.png'],['example1.png'],['example1.png']]
155
 
156
  css = """.output_image, .input_image {height: 600px !important}"""
157