pierreguillou commited on
Commit
4d49335
·
0 Parent(s):

Duplicate from pierreguillou/Inference-APP-Document-Understanding-at-linelevel-LiLT-base-LayoutXLM-base-v1

Browse files
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: >-
3
+ Document Understanding Inference APP (v1 - line level - LiLT base vs LayoutXLM
4
+ base)
5
+ emoji: 🐢
6
+ colorFrom: blue
7
+ colorTo: yellow
8
+ sdk: gradio
9
+ sdk_version: 3.18.0
10
+ app_file: app.py
11
+ pinned: false
12
+ duplicated_from: >-
13
+ pierreguillou/Inference-APP-Document-Understanding-at-linelevel-LiLT-base-LayoutXLM-base-v1
14
+ ---
15
+
16
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # workaround: install old version of pytorch since detectron2 hasn't released packages for pytorch 1.9 (issue: https://github.com/facebookresearch/detectron2/issues/3158)
4
+ # os.system('pip install torch==1.8.0+cu101 torchvision==0.9.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html')
5
+ os.system('pip install -q torch==1.10.0+cu111 torchvision==0.11+cu111 -f https://download.pytorch.org/whl/torch_stable.html')
6
+
7
+ # install detectron2 that matches pytorch 1.8
8
+ # See https://detectron2.readthedocs.io/tutorials/install.html for instructions
9
+ #os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html')
10
+ os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
11
+
12
+ import detectron2
13
+ from detectron2.utils.logger import setup_logger
14
+ setup_logger()
15
+
16
+ import gradio as gr
17
+ import re
18
+ import string
19
+
20
+ from operator import itemgetter
21
+ import collections
22
+
23
+ import pypdf
24
+ from pypdf import PdfReader
25
+ from pypdf.errors import PdfReadError
26
+
27
+ import pdf2image
28
+ from pdf2image import convert_from_path
29
+ import langdetect
30
+ from langdetect import detect_langs
31
+
32
+ import pandas as pd
33
+ import numpy as np
34
+ import random
35
+ import tempfile
36
+ import itertools
37
+
38
+ from matplotlib import font_manager
39
+ from PIL import Image, ImageDraw, ImageFont
40
+ import cv2
41
+
42
+ ## files
43
+
44
+ import sys
45
+ sys.path.insert(0, 'files/')
46
+
47
+ import functions
48
+ from functions import *
49
+
50
+ # update pip
51
+ os.system('python -m pip install --upgrade pip')
52
+
53
+ ## model / feature extractor / tokenizer
54
+
55
+ # models
56
+ model_id_lilt = "pierreguillou/lilt-xlm-roberta-base-finetuned-with-DocLayNet-base-at-linelevel-ml384"
57
+ model_id_layoutxlm = "pierreguillou/layout-xlm-base-finetuned-with-DocLayNet-base-at-linelevel-ml384"
58
+
59
+ # get device
60
+ import torch
61
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
62
+
63
+ ## model LiLT
64
+ import transformers
65
+ from transformers import AutoTokenizer, AutoModelForTokenClassification
66
+ tokenizer_lilt = AutoTokenizer.from_pretrained(model_id_lilt)
67
+ model_lilt = AutoModelForTokenClassification.from_pretrained(model_id_lilt);
68
+ model_lilt.to(device);
69
+
70
+ ## model LayoutXLM
71
+ from transformers import LayoutLMv2ForTokenClassification # LayoutXLMTokenizerFast,
72
+ model_layoutxlm = LayoutLMv2ForTokenClassification.from_pretrained(model_id_layoutxlm);
73
+ model_layoutxlm.to(device);
74
+
75
+ # feature extractor
76
+ from transformers import LayoutLMv2FeatureExtractor
77
+ feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
78
+
79
+ # tokenizer
80
+ from transformers import AutoTokenizer
81
+ tokenizer_layoutxlm = AutoTokenizer.from_pretrained(tokenizer_id_layoutxlm)
82
+
83
+ # get labels
84
+ id2label_lilt = model_lilt.config.id2label
85
+ label2id_lilt = model_lilt.config.label2id
86
+ num_labels_lilt = len(id2label_lilt)
87
+
88
+ id2label_layoutxlm = model_layoutxlm.config.id2label
89
+ label2id_layoutxlm = model_layoutxlm.config.label2id
90
+ num_labels_layoutxlm = len(id2label_layoutxlm)
91
+
92
+ # APP outputs by model
93
+ def app_outputs_by_model(uploaded_pdf, model_id, model, tokenizer, max_length, id2label, cls_box, sep_box):
94
+ filename, msg, images = pdf_to_images(uploaded_pdf)
95
+ num_images = len(images)
96
+
97
+ if not msg.startswith("Error with the PDF"):
98
+
99
+ # Extraction of image data (text and bounding boxes)
100
+ dataset, lines, row_indexes, par_boxes, line_boxes = extraction_data_from_image(images)
101
+ # prepare our data in the format of the model
102
+ prepare_inference_features_partial = partial(prepare_inference_features, tokenizer=tokenizer, max_length=max_length, cls_box=cls_box, sep_box=sep_box)
103
+ encoded_dataset = dataset.map(prepare_inference_features_partial, batched=True, batch_size=64, remove_columns=dataset.column_names)
104
+ custom_encoded_dataset = CustomDataset(encoded_dataset, tokenizer)
105
+ # Get predictions (token level)
106
+ outputs, images_ids_list, chunk_ids, input_ids, bboxes = predictions_token_level(images, custom_encoded_dataset, model_id, model)
107
+ # Get predictions (line level)
108
+ probs_bbox, bboxes_list_dict, input_ids_dict_dict, probs_dict_dict, df = predictions_line_level(max_length, tokenizer, id2label, dataset, outputs, images_ids_list, chunk_ids, input_ids, bboxes, cls_box, sep_box)
109
+ # Get labeled images with lines bounding boxes
110
+ images = get_labeled_images(id2label, dataset, images_ids_list, bboxes_list_dict, probs_dict_dict)
111
+
112
+ img_files = list()
113
+ # get image of PDF without bounding boxes
114
+ for i in range(num_images):
115
+ if filename != "files/blank.png": img_file = f"img_{i}_" + filename.replace(".pdf", ".png")
116
+ else: img_file = filename.replace(".pdf", ".png")
117
+ img_file = img_file.replace("/", "_")
118
+ images[i].save(img_file)
119
+ img_files.append(img_file)
120
+
121
+ if num_images < max_imgboxes:
122
+ img_files += [image_blank]*(max_imgboxes - num_images)
123
+ images += [Image.open(image_blank)]*(max_imgboxes - num_images)
124
+ for count in range(max_imgboxes - num_images):
125
+ df[num_images + count] = pd.DataFrame()
126
+ else:
127
+ img_files = img_files[:max_imgboxes]
128
+ images = images[:max_imgboxes]
129
+ df = dict(itertools.islice(df.items(), max_imgboxes))
130
+
131
+ # save
132
+ csv_files = list()
133
+ for i in range(max_imgboxes):
134
+ csv_file = f"csv_{i}_" + filename.replace(".pdf", ".csv")
135
+ csv_file = csv_file.replace("/", "_")
136
+ csv_files.append(gr.File.update(value=csv_file, visible=True))
137
+ df[i].to_csv(csv_file, encoding="utf-8", index=False)
138
+
139
+ else:
140
+ img_files, images, csv_files = [""]*max_imgboxes, [""]*max_imgboxes, [""]*max_imgboxes
141
+ img_files[0], img_files[1] = image_blank, image_blank
142
+ images[0], images[1] = Image.open(image_blank), Image.open(image_blank)
143
+ csv_file = "csv_wo_content.csv"
144
+ csv_files[0], csv_files[1] = gr.File.update(value=csv_file, visible=True), gr.File.update(value=csv_file, visible=True)
145
+ df, df_empty = dict(), pd.DataFrame()
146
+ df[0], df[1] = df_empty.to_csv(csv_file, encoding="utf-8", index=False), df_empty.to_csv(csv_file, encoding="utf-8", index=False)
147
+
148
+ return msg, img_files[0], images[0], csv_files[0], df[0]
149
+
150
+ def app_outputs(uploaded_pdf):
151
+ msg_lilt, img_files_lilt, images_lilt, csv_files_lilt, df_lilt = app_outputs_by_model(uploaded_pdf,
152
+ model_id=model_id_lilt, model=model_lilt, tokenizer=tokenizer_lilt,
153
+ max_length=max_length_lilt, id2label=id2label_lilt, cls_box=cls_box, sep_box=sep_box_lilt)
154
+
155
+ msg_layoutxlm, img_files_layoutxlm, images_layoutxlm, csv_files_layoutxlm, df_layoutxlm = app_outputs_by_model(uploaded_pdf,
156
+ model_id=model_id_layoutxlm, model=model_layoutxlm, tokenizer=tokenizer_layoutxlm,
157
+ max_length=max_length_layoutxlm, id2label=id2label_layoutxlm, cls_box=cls_box, sep_box=sep_box_layoutxlm)
158
+
159
+ return msg_lilt, msg_layoutxlm, img_files_lilt, img_files_layoutxlm, images_lilt, images_layoutxlm, csv_files_lilt, csv_files_layoutxlm, df_lilt, df_layoutxlm
160
+
161
+ # gradio APP
162
+ with gr.Blocks(title="Inference APP for Document Understanding at line level (v1 - LiLT base vs LayoutXLM base)", css=".gradio-container") as demo:
163
+ gr.HTML("""
164
+ <div style="font-family:'Times New Roman', 'Serif'; font-size:26pt; font-weight:bold; text-align:center;"><h1>Inference APP for Document Understanding at line level (v1 - LiLT base vs LayoutXLM base)</h1></div>
165
+ <div style="margin-top: 40px"><p>(03/08/2023) This Inference APP compares - only on the first PDF page - 2 Document Understanding models finetuned on the dataset <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/datasets/pierreguillou/DocLayNet-base" target="_blank">DocLayNet base</a> at line level (chunk size of 384 tokens): <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/pierreguillou/lilt-xlm-roberta-base-finetuned-with-DocLayNet-base-at-linelevel-ml384" target="_blank">LiLT base combined with XLM-RoBERTa base</a> and <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/pierreguillou/layout-xlm-base-finetuned-with-DocLayNet-base-at-linelevel-ml384" target="_blank">LayoutXLM base combined with XLM-RoBERTa base</a>.</p></div>
166
+ <div><p>To test these 2 models separately, use their corresponding APP on Hugging Face Spaces: <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v1" target="_blank">LiLT base APP (v1 - line level)</a> and <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v2" target="_blank">LayoutXLM base APP (v2 - line level)</a>.</p></div><div style="margin-top: 20px"><p>Links to Document Understanding APPs:</p><ul><li>Line level: <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v1" target="_blank">v1 (LiLT base)</a> | <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v2" target="_blank">v2 (LayoutXLM base)</a> | <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-LiLT-base-LayoutXLM-base-v1" target="_blank">v1 (LilT base vs LayoutXLM base)</a></li><li>Paragraph level: <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-paragraphlevel-v1" target="_blank">v1 (LiLT base)</a></li></ul></div><div style="margin-top: 20px"><p>More information about the DocLayNet datasets, the finetuning of the model and this APP in the following blog posts:</p><ul><li>(03/05/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="" target="_blank">Document AI | Inference APP and fine-tuning notebook for Document Understanding at line level with LayoutXLM base</a></li><li>(02/14/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-inference-app-for-document-understanding-at-line-level-a35bbfa98893" target="_blank">Document AI | Inference APP for Document Understanding at line level</a></li><li>(02/10/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-document-understanding-model-at-line-level-with-lilt-tesseract-and-doclaynet-dataset-347107a643b8" target="_blank">Document AI | Document Understanding model at line level with LiLT, Tesseract and DocLayNet dataset</a></li><li>(01/31/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-doclaynet-image-viewer-app-3ac54c19956" target="_blank">Document AI | DocLayNet image viewer APP</a></li><li>(01/27/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-processing-of-doclaynet-dataset-to-be-used-by-layout-models-of-the-hugging-face-hub-308d8bd81cdb" target="_blank">Document AI | Processing of DocLayNet dataset to be used by layout models of the Hugging Face hub (finetuning, inference)</a></li></ul></div>
167
+ """)
168
+ with gr.Row():
169
+ pdf_file = gr.File(label="PDF")
170
+ with gr.Row():
171
+ submit_btn = gr.Button(f"Get layout detection by LiLT and LayoutXLM on the first PDF page")
172
+ reset_btn = gr.Button(value="Clear")
173
+ with gr.Row():
174
+ output_messages = []
175
+ with gr.Column():
176
+ output_msg = gr.Textbox(label="LiLT output message")
177
+ output_messages.append(output_msg)
178
+ with gr.Column():
179
+ output_msg = gr.Textbox(label="LayoutXLM output message")
180
+ output_messages.append(output_msg)
181
+ with gr.Row():
182
+ fileboxes = []
183
+ with gr.Column():
184
+ file_path = gr.File(visible=True, label=f"LiLT image file")
185
+ fileboxes.append(file_path)
186
+ with gr.Column():
187
+ file_path = gr.File(visible=True, label=f"LayoutXLM image file")
188
+ fileboxes.append(file_path)
189
+ with gr.Row():
190
+ imgboxes = []
191
+ with gr.Column():
192
+ img = gr.Image(type="pil", label=f"Lilt Image")
193
+ imgboxes.append(img)
194
+ with gr.Column():
195
+ img = gr.Image(type="pil", label=f"LayoutXLM Image")
196
+ imgboxes.append(img)
197
+ with gr.Row():
198
+ csvboxes = []
199
+ with gr.Column():
200
+ csv = gr.File(visible=True, label=f"LiLT csv file at line level")
201
+ csvboxes.append(csv)
202
+ with gr.Column():
203
+ csv = gr.File(visible=True, label=f"LayoutXLM csv file at line level")
204
+ csvboxes.append(csv)
205
+ with gr.Row():
206
+ dfboxes = []
207
+ with gr.Column():
208
+ df = gr.Dataframe(
209
+ headers=["bounding boxes", "texts", "labels"],
210
+ datatype=["str", "str", "str"],
211
+ col_count=(3, "fixed"),
212
+ visible=True,
213
+ label=f"LiLT data",
214
+ type="pandas",
215
+ wrap=True
216
+ )
217
+ dfboxes.append(df)
218
+ with gr.Column():
219
+ df = gr.Dataframe(
220
+ headers=["bounding boxes", "texts", "labels"],
221
+ datatype=["str", "str", "str"],
222
+ col_count=(3, "fixed"),
223
+ visible=True,
224
+ label=f"LayoutXLM data",
225
+ type="pandas",
226
+ wrap=True
227
+ )
228
+ dfboxes.append(df)
229
+
230
+ outputboxes = output_messages + fileboxes + imgboxes + csvboxes + dfboxes
231
+
232
+ submit_btn.click(app_outputs, inputs=[pdf_file], outputs=outputboxes)
233
+
234
+ # https://github.com/gradio-app/gradio/pull/2044/files#diff-a91dd2749f68bb7d0099a0f4079a4fd2d10281e299e7b451cb1bb876a7c21975R91
235
+ reset_btn.click(
236
+ lambda: [pdf_file.update(value=None)] + [output_msg.update(value=None) for output_msg in output_messages] + [filebox.update(value=None) for filebox in fileboxes] + [imgbox.update(value=None) for imgbox in imgboxes] + [csvbox.update(value=None) for csvbox in csvboxes] + [dfbox.update(value=None) for dfbox in dfboxes],
237
+ inputs=[],
238
+ outputs=[pdf_file] + output_messages + fileboxes + imgboxes + csvboxes + dfboxes
239
+ )
240
+
241
+ gr.Examples(
242
+ [["files/example.pdf"]],
243
+ [pdf_file],
244
+ outputboxes,
245
+ fn=app_outputs,
246
+ cache_examples=True,
247
+ )
248
+
249
+ demo.launch()
files/README.md ADDED
File without changes
files/blank.pdf ADDED
Binary file (1.15 kB). View file
 
files/blank.png ADDED
files/example.pdf ADDED
Binary file (343 kB). View file
 
files/functions.py ADDED
@@ -0,0 +1,896 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # workaround: install old version of pytorch since detectron2 hasn't released packages for pytorch 1.9 (issue: https://github.com/facebookresearch/detectron2/issues/3158)
4
+ # os.system('pip install torch==1.8.0+cu101 torchvision==0.9.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html')
5
+ os.system('pip install -q torch==1.10.0+cu111 torchvision==0.11+cu111 -f https://download.pytorch.org/whl/torch_stable.html')
6
+
7
+ # install detectron2 that matches pytorch 1.8
8
+ # See https://detectron2.readthedocs.io/tutorials/install.html for instructions
9
+ #os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html')
10
+ os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
11
+
12
+ import detectron2
13
+ from detectron2.utils.logger import setup_logger
14
+ setup_logger()
15
+
16
+ import gradio as gr
17
+ import re
18
+ import string
19
+ import torch
20
+
21
+ from operator import itemgetter
22
+ import collections
23
+
24
+ import pypdf
25
+ from pypdf import PdfReader
26
+ from pypdf.errors import PdfReadError
27
+
28
+ import pdf2image
29
+ from pdf2image import convert_from_path
30
+ import langdetect
31
+ from langdetect import detect_langs
32
+
33
+ import pandas as pd
34
+ import numpy as np
35
+ import random
36
+ import tempfile
37
+ import itertools
38
+
39
+ from matplotlib import font_manager
40
+ from PIL import Image, ImageDraw, ImageFont
41
+ import cv2
42
+
43
+ import pathlib
44
+ from pathlib import Path
45
+ import shutil
46
+
47
+ from functools import partial
48
+
49
+ # Tesseract
50
+ print(os.popen(f'cat /etc/debian_version').read())
51
+ print(os.popen(f'cat /etc/issue').read())
52
+ print(os.popen(f'apt search tesseract').read())
53
+ import pytesseract
54
+
55
+ ## Key parameters
56
+
57
+ # categories colors
58
+ label2color = {
59
+ 'Caption': 'brown',
60
+ 'Footnote': 'orange',
61
+ 'Formula': 'gray',
62
+ 'List-item': 'yellow',
63
+ 'Page-footer': 'red',
64
+ 'Page-header': 'red',
65
+ 'Picture': 'violet',
66
+ 'Section-header': 'orange',
67
+ 'Table': 'green',
68
+ 'Text': 'blue',
69
+ 'Title': 'pink'
70
+ }
71
+
72
+ # bounding boxes start and end of a sequence
73
+ cls_box = [0, 0, 0, 0]
74
+ sep_box_lilt = cls_box
75
+ sep_box_layoutxlm = [1000, 1000, 1000, 1000]
76
+
77
+ # models
78
+ model_id_lilt = "pierreguillou/lilt-xlm-roberta-base-finetuned-with-DocLayNet-base-at-linelevel-ml384"
79
+ model_id_layoutxlm = "pierreguillou/layout-xlm-base-finetuned-with-DocLayNet-base-at-linelevel-ml384"
80
+
81
+ # tokenizer for LayoutXLM
82
+ tokenizer_id_layoutxlm = "xlm-roberta-base"
83
+
84
+ # (tokenization) The maximum length of a feature (sequence)
85
+ if str(384) in model_id_lilt:
86
+ max_length_lilt = 384
87
+ elif str(512) in model_id_lilt:
88
+ max_length_lilt = 512
89
+ else:
90
+ print("Error with max_length_lilt of chunks!")
91
+
92
+ if str(384) in model_id_layoutxlm:
93
+ max_length_layoutxlm = 384
94
+ elif str(512) in model_id_layoutxlm:
95
+ max_length_layoutxlm = 512
96
+ else:
97
+ print("Error with max_length_layoutxlm of chunks!")
98
+
99
+ # (tokenization) overlap
100
+ doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed.
101
+
102
+ # max PDF page images that will be displayed
103
+ max_imgboxes = 1
104
+
105
+ # get files
106
+ examples_dir = 'files/'
107
+ Path(examples_dir).mkdir(parents=True, exist_ok=True)
108
+ from huggingface_hub import hf_hub_download
109
+ files = ["example.pdf", "blank.pdf", "blank.png", "languages_iso.csv", "languages_tesseract.csv", "wo_content.png"]
110
+ for file_name in files:
111
+ path_to_file = hf_hub_download(
112
+ repo_id = "pierreguillou/Inference-APP-Document-Understanding-at-linelevel-LiLT-base-LayoutXLM-base-v1",
113
+ filename = "files/" + file_name,
114
+ repo_type = "space"
115
+ )
116
+ shutil.copy(path_to_file,examples_dir)
117
+
118
+ # path to files
119
+ image_wo_content = examples_dir + "wo_content.png" # image without content
120
+ pdf_blank = examples_dir + "blank.pdf" # blank PDF
121
+ image_blank = examples_dir + "blank.png" # blank image
122
+
123
+ ## get langdetect2Tesseract dictionary
124
+ t = "files/languages_tesseract.csv"
125
+ l = "files/languages_iso.csv"
126
+
127
+ df_t = pd.read_csv(t)
128
+ df_l = pd.read_csv(l)
129
+
130
+ langs_t = df_t["Language"].to_list()
131
+ langs_t = [lang_t.lower().strip().translate(str.maketrans('', '', string.punctuation)) for lang_t in langs_t]
132
+ langs_l = df_l["Language"].to_list()
133
+ langs_l = [lang_l.lower().strip().translate(str.maketrans('', '', string.punctuation)) for lang_l in langs_l]
134
+ langscode_t = df_t["LangCode"].to_list()
135
+ langscode_l = df_l["LangCode"].to_list()
136
+
137
+ Tesseract2langdetect, langdetect2Tesseract = dict(), dict()
138
+ for lang_t, langcode_t in zip(langs_t,langscode_t):
139
+ try:
140
+ if lang_t == "Chinese - Simplified".lower().strip().translate(str.maketrans('', '', string.punctuation)): lang_t = "chinese"
141
+ index = langs_l.index(lang_t)
142
+ langcode_l = langscode_l[index]
143
+ Tesseract2langdetect[langcode_t] = langcode_l
144
+ except:
145
+ continue
146
+
147
+ langdetect2Tesseract = {v:k for k,v in Tesseract2langdetect.items()}
148
+
149
+ ## model / feature extractor / tokenizer
150
+
151
+ # get device
152
+ import torch
153
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
154
+
155
+ ## model LiLT
156
+ import transformers
157
+ from transformers import AutoTokenizer, AutoModelForTokenClassification
158
+ tokenizer_lilt = AutoTokenizer.from_pretrained(model_id_lilt)
159
+ model_lilt = AutoModelForTokenClassification.from_pretrained(model_id_lilt);
160
+ model_lilt.to(device);
161
+
162
+ ## model LayoutXLM
163
+ from transformers import LayoutLMv2ForTokenClassification # LayoutXLMTokenizerFast,
164
+ model_layoutxlm = LayoutLMv2ForTokenClassification.from_pretrained(model_id_layoutxlm);
165
+ model_layoutxlm.to(device);
166
+
167
+ # feature extractor
168
+ from transformers import LayoutLMv2FeatureExtractor
169
+ feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
170
+
171
+ # tokenizer
172
+ from transformers import AutoTokenizer
173
+ tokenizer_layoutxlm = AutoTokenizer.from_pretrained(tokenizer_id_layoutxlm)
174
+
175
+ # get labels
176
+ id2label_lilt = model_lilt.config.id2label
177
+ label2id_lilt = model_lilt.config.label2id
178
+ num_labels_lilt = len(id2label_lilt)
179
+
180
+ id2label_layoutxlm = model_layoutxlm.config.id2label
181
+ label2id_layoutxlm = model_layoutxlm.config.label2id
182
+ num_labels_layoutxlm = len(id2label_layoutxlm)
183
+
184
+ ## General
185
+
186
+ # get text and bounding boxes from an image
187
+ # https://stackoverflow.com/questions/61347755/how-can-i-get-line-coordinates-that-readed-by-tesseract
188
+ # https://medium.com/geekculture/tesseract-ocr-understanding-the-contents-of-documents-beyond-their-text-a98704b7c655
189
+ def get_data(results, factor, conf_min=0):
190
+
191
+ data = {}
192
+ for i in range(len(results['line_num'])):
193
+ level = results['level'][i]
194
+ block_num = results['block_num'][i]
195
+ par_num = results['par_num'][i]
196
+ line_num = results['line_num'][i]
197
+ top, left = results['top'][i], results['left'][i]
198
+ width, height = results['width'][i], results['height'][i]
199
+ conf = results['conf'][i]
200
+ text = results['text'][i]
201
+ if not (text == '' or text.isspace()):
202
+ if conf >= conf_min:
203
+ tup = (text, left, top, width, height)
204
+ if block_num in list(data.keys()):
205
+ if par_num in list(data[block_num].keys()):
206
+ if line_num in list(data[block_num][par_num].keys()):
207
+ data[block_num][par_num][line_num].append(tup)
208
+ else:
209
+ data[block_num][par_num][line_num] = [tup]
210
+ else:
211
+ data[block_num][par_num] = {}
212
+ data[block_num][par_num][line_num] = [tup]
213
+ else:
214
+ data[block_num] = {}
215
+ data[block_num][par_num] = {}
216
+ data[block_num][par_num][line_num] = [tup]
217
+
218
+ # get paragraphs dicionnary with list of lines
219
+ par_data = {}
220
+ par_idx = 1
221
+ for _, b in data.items():
222
+ for _, p in b.items():
223
+ line_data = {}
224
+ line_idx = 1
225
+ for _, l in p.items():
226
+ line_data[line_idx] = l
227
+ line_idx += 1
228
+ par_data[par_idx] = line_data
229
+ par_idx += 1
230
+
231
+ # get lines of texts, grouped by paragraph
232
+ lines = list()
233
+ row_indexes = list()
234
+ row_index = 0
235
+ for _,par in par_data.items():
236
+ count_lines = 0
237
+ for _,line in par.items():
238
+ if count_lines == 0: row_indexes.append(row_index)
239
+ line_text = ' '.join([item[0] for item in line])
240
+ lines.append(line_text)
241
+ count_lines += 1
242
+ row_index += 1
243
+ # lines.append("\n")
244
+ row_index += 1
245
+ # lines = lines[:-1]
246
+
247
+ # get paragraphes boxes (par_boxes)
248
+ # get lines boxes (line_boxes)
249
+ par_boxes = list()
250
+ par_idx = 1
251
+ line_boxes = list()
252
+ line_idx = 1
253
+ for _, par in par_data.items():
254
+ xmins, ymins, xmaxs, ymaxs = list(), list(), list(), list()
255
+ for _, line in par.items():
256
+ xmin, ymin = line[0][1], line[0][2]
257
+ xmax, ymax = (line[-1][1] + line[-1][3]), (line[-1][2] + line[-1][4])
258
+ line_boxes.append([int(xmin/factor), int(ymin/factor), int(xmax/factor), int(ymax/factor)])
259
+ xmins.append(xmin)
260
+ ymins.append(ymin)
261
+ xmaxs.append(xmax)
262
+ ymaxs.append(ymax)
263
+ line_idx += 1
264
+ xmin, ymin, xmax, ymax = min(xmins), min(ymins), max(xmaxs), max(ymaxs)
265
+ par_boxes.append([int(xmin/factor), int(ymin/factor), int(xmax/factor), int(ymax/factor)])
266
+ par_idx += 1
267
+
268
+ return lines, row_indexes, par_boxes, line_boxes #data, par_data #
269
+
270
+ # rescale image to get 300dpi
271
+ def set_image_dpi_resize(image):
272
+ """
273
+ Rescaling image to 300dpi while resizing
274
+ :param image: An image
275
+ :return: A rescaled image
276
+ """
277
+ length_x, width_y = image.size
278
+ factor = min(1, float(1024.0 / length_x))
279
+ size = int(factor * length_x), int(factor * width_y)
280
+ # image_resize = image.resize(size, Image.Resampling.LANCZOS)
281
+ image_resize = image.resize(size, Image.LANCZOS)
282
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='1.png')
283
+ temp_filename = temp_file.name
284
+ image_resize.save(temp_filename, dpi=(300, 300))
285
+ return factor, temp_filename
286
+
287
+ # it is important that each bounding box should be in (upper left, lower right) format.
288
+ # source: https://github.com/NielsRogge/Transformers-Tutorials/issues/129
289
+ def upperleft_to_lowerright(bbox):
290
+ x0, y0, x1, y1 = tuple(bbox)
291
+ if bbox[2] < bbox[0]:
292
+ x0 = bbox[2]
293
+ x1 = bbox[0]
294
+ if bbox[3] < bbox[1]:
295
+ y0 = bbox[3]
296
+ y1 = bbox[1]
297
+ return [x0, y0, x1, y1]
298
+
299
+ # convert boundings boxes (left, top, width, height) format to (left, top, left+widght, top+height) format.
300
+ def convert_box(bbox):
301
+ x, y, w, h = tuple(bbox) # the row comes in (left, top, width, height) format
302
+ return [x, y, x+w, y+h] # we turn it into (left, top, left+widght, top+height) to get the actual box
303
+
304
+ # LiLT model gets 1000x10000 pixels images
305
+ def normalize_box(bbox, width, height):
306
+ return [
307
+ int(1000 * (bbox[0] / width)),
308
+ int(1000 * (bbox[1] / height)),
309
+ int(1000 * (bbox[2] / width)),
310
+ int(1000 * (bbox[3] / height)),
311
+ ]
312
+
313
+ # LiLT model gets 1000x10000 pixels images
314
+ def denormalize_box(bbox, width, height):
315
+ return [
316
+ int(width * (bbox[0] / 1000)),
317
+ int(height * (bbox[1] / 1000)),
318
+ int(width* (bbox[2] / 1000)),
319
+ int(height * (bbox[3] / 1000)),
320
+ ]
321
+
322
+ # get back original size
323
+ def original_box(box, original_width, original_height, coco_width, coco_height):
324
+ return [
325
+ int(original_width * (box[0] / coco_width)),
326
+ int(original_height * (box[1] / coco_height)),
327
+ int(original_width * (box[2] / coco_width)),
328
+ int(original_height* (box[3] / coco_height)),
329
+ ]
330
+
331
+ def get_blocks(bboxes_block, categories, texts):
332
+
333
+ # get list of unique block boxes
334
+ bbox_block_dict, bboxes_block_list, bbox_block_prec = dict(), list(), list()
335
+ for count_block, bbox_block in enumerate(bboxes_block):
336
+ if bbox_block != bbox_block_prec:
337
+ bbox_block_indexes = [i for i, bbox in enumerate(bboxes_block) if bbox == bbox_block]
338
+ bbox_block_dict[count_block] = bbox_block_indexes
339
+ bboxes_block_list.append(bbox_block)
340
+ bbox_block_prec = bbox_block
341
+
342
+ # get list of categories and texts by unique block boxes
343
+ category_block_list, text_block_list = list(), list()
344
+ for bbox_block in bboxes_block_list:
345
+ count_block = bboxes_block.index(bbox_block)
346
+ bbox_block_indexes = bbox_block_dict[count_block]
347
+ category_block = np.array(categories, dtype=object)[bbox_block_indexes].tolist()[0]
348
+ category_block_list.append(category_block)
349
+ text_block = np.array(texts, dtype=object)[bbox_block_indexes].tolist()
350
+ text_block = [text.replace("\n","").strip() for text in text_block]
351
+ if id2label[category_block] == "Text" or id2label[category_block] == "Caption" or id2label[category_block] == "Footnote":
352
+ text_block = ' '.join(text_block)
353
+ else:
354
+ text_block = '\n'.join(text_block)
355
+ text_block_list.append(text_block)
356
+
357
+ return bboxes_block_list, category_block_list, text_block_list
358
+
359
+ # function to sort bounding boxes
360
+ def get_sorted_boxes(bboxes):
361
+
362
+ # sort by y from page top to bottom
363
+ sorted_bboxes = sorted(bboxes, key=itemgetter(1), reverse=False)
364
+ y_list = [bbox[1] for bbox in sorted_bboxes]
365
+
366
+ # sort by x from page left to right when boxes with same y
367
+ if len(list(set(y_list))) != len(y_list):
368
+ y_list_duplicates_indexes = dict()
369
+ y_list_duplicates = [item for item, count in collections.Counter(y_list).items() if count > 1]
370
+ for item in y_list_duplicates:
371
+ y_list_duplicates_indexes[item] = [i for i, e in enumerate(y_list) if e == item]
372
+ bbox_list_y_duplicates = sorted(np.array(sorted_bboxes, dtype=object)[y_list_duplicates_indexes[item]].tolist(), key=itemgetter(0), reverse=False)
373
+ np_array_bboxes = np.array(sorted_bboxes)
374
+ np_array_bboxes[y_list_duplicates_indexes[item]] = np.array(bbox_list_y_duplicates)
375
+ sorted_bboxes = np_array_bboxes.tolist()
376
+
377
+ return sorted_bboxes
378
+
379
+ # sort data from y = 0 to end of page (and after, x=0 to end of page when necessary)
380
+ def sort_data(bboxes, categories, texts):
381
+
382
+ sorted_bboxes = get_sorted_boxes(bboxes)
383
+ sorted_bboxes_indexes = [bboxes.index(bbox) for bbox in sorted_bboxes]
384
+ sorted_categories = np.array(categories, dtype=object)[sorted_bboxes_indexes].tolist()
385
+ sorted_texts = np.array(texts, dtype=object)[sorted_bboxes_indexes].tolist()
386
+
387
+ return sorted_bboxes, sorted_categories, sorted_texts
388
+
389
+ # sort data from y = 0 to end of page (and after, x=0 to end of page when necessary)
390
+ def sort_data_wo_labels(bboxes, texts):
391
+
392
+ sorted_bboxes = get_sorted_boxes(bboxes)
393
+ sorted_bboxes_indexes = [bboxes.index(bbox) for bbox in sorted_bboxes]
394
+ sorted_texts = np.array(texts, dtype=object)[sorted_bboxes_indexes].tolist()
395
+
396
+ return sorted_bboxes, sorted_texts
397
+
398
+ ## PDF processing
399
+
400
+ # get filename and images of PDF pages
401
+ def pdf_to_images(uploaded_pdf):
402
+
403
+ # Check if None object
404
+ if uploaded_pdf is None:
405
+ path_to_file = pdf_blank
406
+ filename = path_to_file.replace(examples_dir,"")
407
+ msg = "Invalid PDF file."
408
+ images = [Image.open(image_blank)]
409
+ else:
410
+ # path to the uploaded PDF
411
+ path_to_file = uploaded_pdf.name
412
+ filename = path_to_file.replace("/tmp/","")
413
+
414
+ try:
415
+ PdfReader(path_to_file)
416
+ except PdfReadError:
417
+ path_to_file = pdf_blank
418
+ filename = path_to_file.replace(examples_dir,"")
419
+ msg = "Invalid PDF file."
420
+ images = [Image.open(image_blank)]
421
+ else:
422
+ try:
423
+ images = convert_from_path(path_to_file, last_page=max_imgboxes)
424
+ num_imgs = len(images)
425
+ msg = f'The PDF "{filename}" was converted into {num_imgs} images.'
426
+ except:
427
+ msg = f'Error with the PDF "{filename}": it was not converted into images.'
428
+ images = [Image.open(image_wo_content)]
429
+
430
+ return filename, msg, images
431
+
432
+ # Extraction of image data (text and bounding boxes)
433
+ def extraction_data_from_image(images):
434
+
435
+ num_imgs = len(images)
436
+
437
+ if num_imgs > 0:
438
+
439
+ # https://pyimagesearch.com/2021/11/15/tesseract-page-segmentation-modes-psms-explained-how-to-improve-your-ocr-accuracy/
440
+ custom_config = r'--oem 3 --psm 3 -l eng' # default config PyTesseract: --oem 3 --psm 3 -l eng+deu+fra+jpn+por+spa+rus+hin+chi_sim
441
+ results, lines, row_indexes, par_boxes, line_boxes, images_pixels = dict(), dict(), dict(), dict(), dict(), dict()
442
+ images_ids_list, lines_list, par_boxes_list, line_boxes_list, images_list, images_pixels_list, page_no_list, num_pages_list = list(), list(), list(), list(), list(), list(), list(), list()
443
+
444
+ try:
445
+ for i,image in enumerate(images):
446
+ # image preprocessing
447
+ # https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html
448
+ img = image.copy()
449
+ factor, path_to_img = set_image_dpi_resize(img) # Rescaling to 300dpi
450
+ img = Image.open(path_to_img)
451
+ img = np.array(img, dtype='uint8') # convert PIL to cv2
452
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # gray scale image
453
+ ret,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
454
+
455
+ # OCR PyTesseract | get langs of page
456
+ txt = pytesseract.image_to_string(img, config=custom_config)
457
+ txt = txt.strip().lower()
458
+ txt = re.sub(r" +", " ", txt) # multiple space
459
+ txt = re.sub(r"(\n\s*)+\n+", "\n", txt) # multiple line
460
+ # txt = os.popen(f'tesseract {img_filepath} - {custom_config}').read()
461
+ try:
462
+ langs = detect_langs(txt)
463
+ langs = [langdetect2Tesseract[langs[i].lang] for i in range(len(langs))]
464
+ langs_string = '+'.join(langs)
465
+ except:
466
+ langs_string = "eng"
467
+ langs_string += '+osd'
468
+ custom_config = f'--oem 3 --psm 3 -l {langs_string}' # default config PyTesseract: --oem 3 --psm 3
469
+
470
+ # OCR PyTesseract | get data
471
+ results[i] = pytesseract.image_to_data(img, config=custom_config, output_type=pytesseract.Output.DICT)
472
+ # results[i] = os.popen(f'tesseract {img_filepath} - {custom_config}').read()
473
+
474
+ # get image pixels
475
+ images_pixels[i] = feature_extractor(images[i], return_tensors="pt").pixel_values
476
+
477
+ lines[i], row_indexes[i], par_boxes[i], line_boxes[i] = get_data(results[i], factor, conf_min=0)
478
+ lines_list.append(lines[i])
479
+ par_boxes_list.append(par_boxes[i])
480
+ line_boxes_list.append(line_boxes[i])
481
+ images_ids_list.append(i)
482
+ images_pixels_list.append(images_pixels[i])
483
+ images_list.append(images[i])
484
+ page_no_list.append(i)
485
+ num_pages_list.append(num_imgs)
486
+
487
+ except:
488
+ print(f"There was an error within the extraction of PDF text by the OCR!")
489
+ else:
490
+ from datasets import Dataset
491
+ dataset = Dataset.from_dict({"images_ids": images_ids_list, "images": images_list, "images_pixels": images_pixels_list, "page_no": page_no_list, "num_pages": num_pages_list, "texts": lines_list, "bboxes_line": line_boxes_list})
492
+
493
+ # print(f"The text data was successfully extracted by the OCR!")
494
+
495
+ return dataset, lines, row_indexes, par_boxes, line_boxes
496
+
497
+ ## Inference
498
+
499
+ def prepare_inference_features(example, tokenizer, max_length, cls_box, sep_box):
500
+
501
+ images_ids_list, chunks_ids_list, input_ids_list, attention_mask_list, bb_list, images_pixels_list = list(), list(), list(), list(), list(), list()
502
+
503
+ # get batch
504
+ batch_images_ids = example["images_ids"]
505
+ batch_images = example["images"]
506
+ batch_images_pixels = example["images_pixels"]
507
+ batch_bboxes_line = example["bboxes_line"]
508
+ batch_texts = example["texts"]
509
+ batch_images_size = [image.size for image in batch_images]
510
+
511
+ batch_width, batch_height = [image_size[0] for image_size in batch_images_size], [image_size[1] for image_size in batch_images_size]
512
+
513
+ # add a dimension if not a batch but only one image
514
+ if not isinstance(batch_images_ids, list):
515
+ batch_images_ids = [batch_images_ids]
516
+ batch_images = [batch_images]
517
+ batch_images_pixels = [batch_images_pixels]
518
+ batch_bboxes_line = [batch_bboxes_line]
519
+ batch_texts = [batch_texts]
520
+ batch_width, batch_height = [batch_width], [batch_height]
521
+
522
+ # process all images of the batch
523
+ for num_batch, (image_id, image_pixels, boxes, texts, width, height) in enumerate(zip(batch_images_ids, batch_images_pixels, batch_bboxes_line, batch_texts, batch_width, batch_height)):
524
+ tokens_list = []
525
+ bboxes_list = []
526
+
527
+ # add a dimension if only on image
528
+ if not isinstance(texts, list):
529
+ texts, boxes = [texts], [boxes]
530
+
531
+ # convert boxes to original
532
+ normalize_bboxes_line = [normalize_box(upperleft_to_lowerright(box), width, height) for box in boxes]
533
+
534
+ # sort boxes with texts
535
+ # we want sorted lists from top to bottom of the image
536
+ boxes, texts = sort_data_wo_labels(normalize_bboxes_line, texts)
537
+
538
+ count = 0
539
+ for box, text in zip(boxes, texts):
540
+ tokens = tokenizer.tokenize(text)
541
+ num_tokens = len(tokens) # get number of tokens
542
+ tokens_list.extend(tokens)
543
+
544
+ bboxes_list.extend([box] * num_tokens) # number of boxes must be the same as the number of tokens
545
+
546
+ # use of return_overflowing_tokens=True / stride=doc_stride
547
+ # to get parts of image with overlap
548
+ # source: https://huggingface.co/course/chapter6/3b?fw=tf#handling-long-contexts
549
+ encodings = tokenizer(" ".join(texts),
550
+ truncation=True,
551
+ padding="max_length",
552
+ max_length=max_length,
553
+ stride=doc_stride,
554
+ return_overflowing_tokens=True,
555
+ return_offsets_mapping=True
556
+ )
557
+
558
+ otsm = encodings.pop("overflow_to_sample_mapping")
559
+ offset_mapping = encodings.pop("offset_mapping")
560
+
561
+ # Let's label those examples and get their boxes
562
+ sequence_length_prev = 0
563
+ for i, offsets in enumerate(offset_mapping):
564
+ # truncate tokens, boxes and labels based on length of chunk - 2 (special tokens <s> and </s>)
565
+ sequence_length = len(encodings.input_ids[i]) - 2
566
+ if i == 0: start = 0
567
+ else: start += sequence_length_prev - doc_stride
568
+ end = start + sequence_length
569
+ sequence_length_prev = sequence_length
570
+
571
+ # get tokens, boxes and labels of this image chunk
572
+ bb = [cls_box] + bboxes_list[start:end] + [sep_box]
573
+
574
+ # as the last chunk can have a length < max_length
575
+ # we must to add [tokenizer.pad_token] (tokens), [sep_box] (boxes) and [-100] (labels)
576
+ if len(bb) < max_length:
577
+ bb = bb + [sep_box] * (max_length - len(bb))
578
+
579
+ # append results
580
+ input_ids_list.append(encodings["input_ids"][i])
581
+ attention_mask_list.append(encodings["attention_mask"][i])
582
+ bb_list.append(bb)
583
+ images_ids_list.append(image_id)
584
+ chunks_ids_list.append(i)
585
+ images_pixels_list.append(image_pixels)
586
+
587
+ return {
588
+ "images_ids": images_ids_list,
589
+ "chunk_ids": chunks_ids_list,
590
+ "input_ids": input_ids_list,
591
+ "attention_mask": attention_mask_list,
592
+ "normalized_bboxes": bb_list,
593
+ "images_pixels": images_pixels_list
594
+ }
595
+
596
+ from torch.utils.data import Dataset
597
+
598
+ class CustomDataset(Dataset):
599
+ def __init__(self, dataset, tokenizer):
600
+ self.dataset = dataset
601
+ self.tokenizer = tokenizer
602
+
603
+ def __len__(self):
604
+ return len(self.dataset)
605
+
606
+ def __getitem__(self, idx):
607
+ # get item
608
+ example = self.dataset[idx]
609
+ encoding = dict()
610
+ encoding["images_ids"] = example["images_ids"]
611
+ encoding["chunk_ids"] = example["chunk_ids"]
612
+ encoding["input_ids"] = example["input_ids"]
613
+ encoding["attention_mask"] = example["attention_mask"]
614
+ encoding["bbox"] = example["normalized_bboxes"]
615
+ encoding["images_pixels"] = example["images_pixels"]
616
+
617
+ return encoding
618
+
619
+ import torch.nn.functional as F
620
+
621
+ # get predictions at token level
622
+ def predictions_token_level(images, custom_encoded_dataset, model_id, model):
623
+
624
+ num_imgs = len(images)
625
+ if num_imgs > 0:
626
+
627
+ chunk_ids, input_ids, bboxes, pixels_values, outputs, token_predictions = dict(), dict(), dict(), dict(), dict(), dict()
628
+ images_ids_list = list()
629
+
630
+ for i,encoding in enumerate(custom_encoded_dataset):
631
+
632
+ # get custom encoded data
633
+ image_id = encoding['images_ids']
634
+ chunk_id = encoding['chunk_ids']
635
+ input_id = torch.tensor(encoding['input_ids'])[None]
636
+ attention_mask = torch.tensor(encoding['attention_mask'])[None]
637
+ bbox = torch.tensor(encoding['bbox'])[None]
638
+ pixel_values = torch.tensor(encoding["images_pixels"])
639
+
640
+ # save data in dictionnaries
641
+ if image_id not in images_ids_list: images_ids_list.append(image_id)
642
+
643
+ if image_id in chunk_ids: chunk_ids[image_id].append(chunk_id)
644
+ else: chunk_ids[image_id] = [chunk_id]
645
+
646
+ if image_id in input_ids: input_ids[image_id].append(input_id)
647
+ else: input_ids[image_id] = [input_id]
648
+
649
+ if image_id in bboxes: bboxes[image_id].append(bbox)
650
+ else: bboxes[image_id] = [bbox]
651
+
652
+ if image_id in pixels_values: pixels_values[image_id].append(pixel_values)
653
+ else: pixels_values[image_id] = [pixel_values]
654
+
655
+ # get prediction with forward pass
656
+ with torch.no_grad():
657
+
658
+ if model_id == model_id_lilt:
659
+ output = model(
660
+ input_ids=input_id.to(device),
661
+ attention_mask=attention_mask.to(device),
662
+ bbox=bbox.to(device),
663
+ )
664
+ elif model_id == model_id_layoutxlm:
665
+ output = model(
666
+ input_ids=input_id.to(device),
667
+ attention_mask=attention_mask.to(device),
668
+ bbox=bbox.to(device),
669
+ image=pixel_values.to(device)
670
+ )
671
+
672
+ # save probabilities of predictions in dictionnary
673
+ if image_id in outputs: outputs[image_id].append(F.softmax(output.logits.squeeze(), dim=-1))
674
+ else: outputs[image_id] = [F.softmax(output.logits.squeeze(), dim=-1)]
675
+
676
+ return outputs, images_ids_list, chunk_ids, input_ids, bboxes
677
+
678
+ else:
679
+ print("An error occurred while getting predictions!")
680
+
681
+ from functools import reduce
682
+
683
+ # Get predictions (line level)
684
+ def predictions_line_level(max_length, tokenizer, id2label, dataset, outputs, images_ids_list, chunk_ids, input_ids, bboxes, cls_box, sep_box):
685
+
686
+ ten_probs_dict, ten_input_ids_dict, ten_bboxes_dict = dict(), dict(), dict()
687
+ bboxes_list_dict, input_ids_dict_dict, probs_dict_dict, df = dict(), dict(), dict(), dict()
688
+
689
+ if len(images_ids_list) > 0:
690
+
691
+ for i, image_id in enumerate(images_ids_list):
692
+
693
+ # get image information
694
+ images_list = dataset.filter(lambda example: example["images_ids"] == image_id)["images"]
695
+ image = images_list[0]
696
+ width, height = image.size
697
+
698
+ # get data
699
+ chunk_ids_list = chunk_ids[image_id]
700
+ outputs_list = outputs[image_id]
701
+ input_ids_list = input_ids[image_id]
702
+ bboxes_list = bboxes[image_id]
703
+
704
+ # create zeros tensors
705
+ ten_probs = torch.zeros((outputs_list[0].shape[0] - 2)*len(outputs_list), outputs_list[0].shape[1])
706
+ ten_input_ids = torch.ones(size=(1, (outputs_list[0].shape[0] - 2)*len(outputs_list)), dtype =int)
707
+ ten_bboxes = torch.zeros(size=(1, (outputs_list[0].shape[0] - 2)*len(outputs_list), 4), dtype =int)
708
+
709
+ if len(outputs_list) > 1:
710
+
711
+ for num_output, (output, input_id, bbox) in enumerate(zip(outputs_list, input_ids_list, bboxes_list)):
712
+ start = num_output*(max_length - 2) - max(0,num_output)*doc_stride
713
+ end = start + (max_length - 2)
714
+
715
+ if num_output == 0:
716
+ ten_probs[start:end,:] += output[1:-1]
717
+ ten_input_ids[:,start:end] = input_id[:,1:-1]
718
+ ten_bboxes[:,start:end,:] = bbox[:,1:-1,:]
719
+ else:
720
+ ten_probs[start:start + doc_stride,:] += output[1:1 + doc_stride]
721
+ ten_probs[start:start + doc_stride,:] = ten_probs[start:start + doc_stride,:] * 0.5
722
+ ten_probs[start + doc_stride:end,:] += output[1 + doc_stride:-1]
723
+
724
+ ten_input_ids[:,start:start + doc_stride] = input_id[:,1:1 + doc_stride]
725
+ ten_input_ids[:,start + doc_stride:end] = input_id[:,1 + doc_stride:-1]
726
+
727
+ ten_bboxes[:,start:start + doc_stride,:] = bbox[:,1:1 + doc_stride,:]
728
+ ten_bboxes[:,start + doc_stride:end,:] = bbox[:,1 + doc_stride:-1,:]
729
+
730
+ else:
731
+ ten_probs += outputs_list[0][1:-1]
732
+ ten_input_ids = input_ids_list[0][:,1:-1]
733
+ ten_bboxes = bboxes_list[0][:,1:-1]
734
+
735
+ ten_probs_list, ten_input_ids_list, ten_bboxes_list = ten_probs.tolist(), ten_input_ids.tolist()[0], ten_bboxes.tolist()[0]
736
+ bboxes_list = list()
737
+ input_ids_dict, probs_dict = dict(), dict()
738
+ bbox_prev = [-100, -100, -100, -100]
739
+ for probs, input_id, bbox in zip(ten_probs_list, ten_input_ids_list, ten_bboxes_list):
740
+ bbox = denormalize_box(bbox, width, height)
741
+ if bbox != bbox_prev and bbox != cls_box and bbox != sep_box and bbox[0] != bbox[2] and bbox[1] != bbox[3]:
742
+ bboxes_list.append(bbox)
743
+ input_ids_dict[str(bbox)] = [input_id]
744
+ probs_dict[str(bbox)] = [probs]
745
+ elif bbox != cls_box and bbox != sep_box and bbox[0] != bbox[2] and bbox[1] != bbox[3]:
746
+ input_ids_dict[str(bbox)].append(input_id)
747
+ probs_dict[str(bbox)].append(probs)
748
+ bbox_prev = bbox
749
+
750
+ probs_bbox = dict()
751
+ for i,bbox in enumerate(bboxes_list):
752
+ probs = probs_dict[str(bbox)]
753
+ probs = np.array(probs).T.tolist()
754
+
755
+ probs_label = list()
756
+ for probs_list in probs:
757
+ prob_label = reduce(lambda x, y: x*y, probs_list)
758
+ probs_label.append(prob_label)
759
+ max_value = max(probs_label)
760
+ max_index = probs_label.index(max_value)
761
+ probs_bbox[str(bbox)] = max_index
762
+
763
+ bboxes_list_dict[image_id] = bboxes_list
764
+ input_ids_dict_dict[image_id] = input_ids_dict
765
+ probs_dict_dict[image_id] = probs_bbox
766
+
767
+ df[image_id] = pd.DataFrame()
768
+ df[image_id]["bboxes"] = bboxes_list
769
+ df[image_id]["texts"] = [tokenizer.decode(input_ids_dict[str(bbox)]) for bbox in bboxes_list]
770
+ df[image_id]["labels"] = [id2label[probs_bbox[str(bbox)]] for bbox in bboxes_list]
771
+
772
+ return probs_bbox, bboxes_list_dict, input_ids_dict_dict, probs_dict_dict, df
773
+
774
+ else:
775
+ print("An error occurred while getting predictions!")
776
+
777
+ # Get labeled images with lines bounding boxes
778
+ def get_labeled_images(id2label, dataset, images_ids_list, bboxes_list_dict, probs_dict_dict):
779
+
780
+ labeled_images = list()
781
+
782
+ for i, image_id in enumerate(images_ids_list):
783
+
784
+ # get image
785
+ images_list = dataset.filter(lambda example: example["images_ids"] == image_id)["images"]
786
+ image = images_list[0]
787
+ width, height = image.size
788
+
789
+ # get predicted boxes and labels
790
+ bboxes_list = bboxes_list_dict[image_id]
791
+ probs_bbox = probs_dict_dict[image_id]
792
+
793
+ draw = ImageDraw.Draw(image)
794
+ # https://stackoverflow.com/questions/66274858/choosing-a-pil-imagefont-by-font-name-rather-than-filename-and-cross-platform-f
795
+ font = font_manager.FontProperties(family='sans-serif', weight='bold')
796
+ font_file = font_manager.findfont(font)
797
+ font_size = 30
798
+ font = ImageFont.truetype(font_file, font_size)
799
+
800
+ for bbox in bboxes_list:
801
+ predicted_label = id2label[probs_bbox[str(bbox)]]
802
+ draw.rectangle(bbox, outline=label2color[predicted_label])
803
+ draw.text((bbox[0] + 10, bbox[1] - font_size), text=predicted_label, fill=label2color[predicted_label], font=font)
804
+
805
+ labeled_images.append(image)
806
+
807
+ return labeled_images
808
+
809
+ # get data of encoded chunk
810
+ def get_encoded_chunk_inference(tokenizer, dataset, encoded_dataset, index_chunk=None):
811
+
812
+ # get datasets
813
+ example = dataset
814
+ encoded_example = encoded_dataset
815
+
816
+ # get randomly a document in dataset
817
+ if index_chunk == None: index_chunk = random.randint(0, len(encoded_example)-1)
818
+ encoded_example = encoded_example[index_chunk]
819
+ encoded_image_ids = encoded_example["images_ids"]
820
+
821
+ # get the image
822
+ example = example.filter(lambda example: example["images_ids"] == encoded_image_ids)[0]
823
+ image = example["images"] # original image
824
+ width, height = image.size
825
+ page_no = example["page_no"]
826
+ num_pages = example["num_pages"]
827
+
828
+ # get boxes, texts, categories
829
+ bboxes, input_ids = encoded_example["normalized_bboxes"][1:-1], encoded_example["input_ids"][1:-1]
830
+ bboxes = [denormalize_box(bbox, width, height) for bbox in bboxes]
831
+ num_tokens = len(input_ids) + 2
832
+
833
+ # get unique bboxes and corresponding labels
834
+ bboxes_list, input_ids_list = list(), list()
835
+ input_ids_dict = dict()
836
+ bbox_prev = [-100, -100, -100, -100]
837
+ for i, (bbox, input_id) in enumerate(zip(bboxes, input_ids)):
838
+ if bbox != bbox_prev:
839
+ bboxes_list.append(bbox)
840
+ input_ids_dict[str(bbox)] = [input_id]
841
+ else:
842
+ input_ids_dict[str(bbox)].append(input_id)
843
+
844
+ # start_indexes_list.append(i)
845
+ bbox_prev = bbox
846
+
847
+ # do not keep "</s><pad><pad>..."
848
+ if input_ids_dict[str(bboxes_list[-1])][0] == (tokenizer.convert_tokens_to_ids('</s>')):
849
+ del input_ids_dict[str(bboxes_list[-1])]
850
+ bboxes_list = bboxes_list[:-1]
851
+
852
+ # get texts by line
853
+ input_ids_list = input_ids_dict.values()
854
+ texts_list = [tokenizer.decode(input_ids) for input_ids in input_ids_list]
855
+
856
+ # display DataFrame
857
+ df = pd.DataFrame({"texts": texts_list, "input_ids": input_ids_list, "bboxes": bboxes_list})
858
+
859
+ return image, df, num_tokens, page_no, num_pages
860
+
861
+ # display chunk of PDF image and its data
862
+ def display_chunk_lines_inference(dataset, encoded_dataset, index_chunk=None):
863
+
864
+ # get image and image data
865
+ image, df, num_tokens, page_no, num_pages = get_encoded_chunk_inference(dataset, encoded_dataset, index_chunk=index_chunk)
866
+
867
+ # get data from dataframe
868
+ input_ids = df["input_ids"]
869
+ texts = df["texts"]
870
+ bboxes = df["bboxes"]
871
+
872
+ print(f'Chunk ({num_tokens} tokens) of the PDF (page: {page_no+1} / {num_pages})\n')
873
+
874
+ # display image with bounding boxes
875
+ print(">> PDF image with bounding boxes of lines\n")
876
+ draw = ImageDraw.Draw(image)
877
+
878
+ labels = list()
879
+ for box, text in zip(bboxes, texts):
880
+ color = "red"
881
+ draw.rectangle(box, outline=color)
882
+
883
+ # resize image to original
884
+ width, height = image.size
885
+ image = image.resize((int(0.5*width), int(0.5*height)))
886
+
887
+ # convert to cv and display
888
+ img = np.array(image, dtype='uint8') # PIL to cv2
889
+ cv2_imshow(img)
890
+ cv2.waitKey(0)
891
+
892
+ # display image dataframe
893
+ print("\n>> Dataframe of annotated lines\n")
894
+ cols = ["texts", "bboxes"]
895
+ df = df[cols]
896
+ display(df)
files/languages_iso.csv ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Language,LangCode
2
+ Abkhazian,ab
3
+ Afar,aa
4
+ Afrikaans,af
5
+ Akan,ak
6
+ Albanian,sq
7
+ Amharic,am
8
+ Arabic,ar
9
+ Aragonese,an
10
+ Armenian,hy
11
+ Assamese,as
12
+ Avaric,av
13
+ Avestan,ae
14
+ Aymara,ay
15
+ Azerbaijani,az
16
+ Bambara,bm
17
+ Bashkir,ba
18
+ Basque,eu
19
+ Belarusian,be
20
+ Bengali,bn
21
+ Bislama,bi
22
+ Bosnian,bs
23
+ Breton,br
24
+ Bulgarian,bg
25
+ Burmese,my
26
+ "Catalan, Valencian",ca
27
+ Chamorro,ch
28
+ Chechen,ce
29
+ "Chichewa, Chewa, Nyanja",ny
30
+ Chinese,zh
31
+ "Church Slavonic, Old Slavonic, Old Church Slavonic",cu
32
+ Chuvash,cv
33
+ Cornish,kw
34
+ Corsican,co
35
+ Cree,cr
36
+ Croatian,hr
37
+ Czech,cs
38
+ Danish,da
39
+ "Divehi, Dhivehi, Maldivian",dv
40
+ "Dutch, Flemish",nl
41
+ Dzongkha,dz
42
+ English,en
43
+ Esperanto,eo
44
+ Estonian,et
45
+ Ewe,ee
46
+ Faroese,fo
47
+ Fijian,fj
48
+ Finnish,fi
49
+ French,fr
50
+ Western Frisian,fy
51
+ Fulah,ff
52
+ "Gaelic, Scottish Gaelic",gd
53
+ Galician,gl
54
+ Ganda,lg
55
+ Georgian,ka
56
+ German,de
57
+ "Greek, Modern (1453–)",el
58
+ "Kalaallisut, Greenlandic",kl
59
+ Guarani,gn
60
+ Gujarati,gu
61
+ "Haitian, Haitian Creole",ht
62
+ Hausa,ha
63
+ Hebrew,he
64
+ Herero,hz
65
+ Hindi,hi
66
+ Hiri Motu,ho
67
+ Hungarian,hu
68
+ Icelandic,is
69
+ Ido,io
70
+ Igbo,ig
71
+ Indonesian,id
72
+ Interlingua (International Auxiliary Language Association),ia
73
+ "Interlingue, Occidental",ie
74
+ Inuktitut,iu
75
+ Inupiaq,ik
76
+ Irish,ga
77
+ Italian,it
78
+ Japanese,ja
79
+ Javanese,jv
80
+ Kannada,kn
81
+ Kanuri,kr
82
+ Kashmiri,ks
83
+ Kazakh,kk
84
+ Central Khmer,km
85
+ "Kikuyu, Gikuyu",ki
86
+ Kinyarwanda,rw
87
+ "Kirghiz, Kyrgyz",ky
88
+ Komi,kv
89
+ Kongo,kg
90
+ Korean,ko
91
+ "Kuanyama, Kwanyama",kj
92
+ Kurdish,ku
93
+ Lao,lo
94
+ Latin,la
95
+ Latvian,lv
96
+ "Limburgan, Limburger, Limburgish",li
97
+ Lingala,ln
98
+ Lithuanian,lt
99
+ Luba-Katanga,lu
100
+ "Luxembourgish, Letzeburgesch",lb
101
+ Macedonian,mk
102
+ Malagasy,mg
103
+ Malay,ms
104
+ Malayalam,ml
105
+ Maltese,mt
106
+ Manx,gv
107
+ Maori,mi
108
+ Marathi,mr
109
+ Marshallese,mh
110
+ Mongolian,mn
111
+ Nauru,na
112
+ "Navajo, Navaho",nv
113
+ North Ndebele,nd
114
+ South Ndebele,nr
115
+ Ndonga,ng
116
+ Nepali,ne
117
+ Norwegian,no
118
+ Norwegian Bokmål,nb
119
+ Norwegian Nynorsk,nn
120
+ "Sichuan Yi, Nuosu",ii
121
+ Occitan,oc
122
+ Ojibwa,oj
123
+ Oriya,or
124
+ Oromo,om
125
+ "Ossetian, Ossetic",os
126
+ Pali,pi
127
+ "Pashto, Pushto",ps
128
+ Persian,fa
129
+ Polish,pl
130
+ Portuguese,pt
131
+ "Punjabi, Panjabi",pa
132
+ Quechua,qu
133
+ "Romanian, Moldavian, Moldovan",ro
134
+ Romansh,rm
135
+ Rundi,rn
136
+ Russian,ru
137
+ Northern Sami,se
138
+ Samoan,sm
139
+ Sango,sg
140
+ Sanskrit,sa
141
+ Sardinian,sc
142
+ Serbian,sr
143
+ Shona,sn
144
+ Sindhi,sd
145
+ "Sinhala, Sinhalese",si
146
+ Slovak,sk
147
+ Slovenian,sl
148
+ Somali,so
149
+ Southern Sotho,st
150
+ "Spanish, Castilian",es
151
+ Sundanese,su
152
+ Swahili,sw
153
+ Swati,ss
154
+ Swedish,sv
155
+ Tagalog,tl
156
+ Tahitian,ty
157
+ Tajik,tg
158
+ Tamil,ta
159
+ Tatar,tt
160
+ Telugu,te
161
+ Thai,th
162
+ Tibetan,bo
163
+ Tigrinya,ti
164
+ Tonga (Tonga Islands),to
165
+ Tsonga,ts
166
+ Tswana,tn
167
+ Turkish,tr
168
+ Turkmen,tk
169
+ Twi,tw
170
+ "Uighur, Uyghur",ug
171
+ Ukrainian,uk
172
+ Urdu,ur
173
+ Uzbek,uz
174
+ Venda,ve
175
+ Vietnamese,vi
176
+ Volapük,vo
177
+ Walloon,wa
178
+ Welsh,cy
179
+ Wolof,wo
180
+ Xhosa,xh
181
+ Yiddish,yi
182
+ Yoruba,yo
183
+ "Zhuang, Chuang",za
184
+ Zulu,zu
files/languages_tesseract.csv ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Language,LangCode
2
+ Afrikaans,afr
3
+ Amharic,amh
4
+ Arabic,ara
5
+ Assamese,asm
6
+ Azerbaijani,aze
7
+ Azerbaijani - Cyrilic,aze_cyrl
8
+ Belarusian,bel
9
+ Bengali,ben
10
+ Tibetan,bod
11
+ Bosnian,bos
12
+ Breton,bre
13
+ Bulgarian,bul
14
+ Catalan; Valencian,cat
15
+ Cebuano,ceb
16
+ Czech,ces
17
+ Chinese - Simplified,chi_sim
18
+ Chinese - Traditional,chi_tra
19
+ Cherokee,chr
20
+ Corsican,cos
21
+ Welsh,cym
22
+ Danish,dan
23
+ Danish - Fraktur (contrib),dan_frak
24
+ German,deu
25
+ German - Fraktur (contrib),deu_frak
26
+ Dzongkha,dzo
27
+ "Greek, Modern (1453-)",ell
28
+ English,eng
29
+ "English, Middle (1100-1500)",enm
30
+ Esperanto,epo
31
+ Math / equation detection module,equ
32
+ Estonian,est
33
+ Basque,eus
34
+ Faroese,fao
35
+ Persian,fas
36
+ Filipino (old - Tagalog),fil
37
+ Finnish,fin
38
+ French,fra
39
+ German - Fraktur,frk
40
+ "French, Middle (ca.1400-1600)",frm
41
+ Western Frisian,fry
42
+ Scottish Gaelic,gla
43
+ Irish,gle
44
+ Galician,glg
45
+ "Greek, Ancient (to 1453) (contrib)",grc
46
+ Gujarati,guj
47
+ Haitian; Haitian Creole,hat
48
+ Hebrew,heb
49
+ Hindi,hin
50
+ Croatian,hrv
51
+ Hungarian,hun
52
+ Armenian,hye
53
+ Inuktitut,iku
54
+ Indonesian,ind
55
+ Icelandic,isl
56
+ Italian,ita
57
+ Italian - Old,ita_old
58
+ Javanese,jav
59
+ Japanese,jpn
60
+ Kannada,kan
61
+ Georgian,kat
62
+ Georgian - Old,kat_old
63
+ Kazakh,kaz
64
+ Central Khmer,khm
65
+ Kirghiz; Kyrgyz,kir
66
+ Kurmanji (Kurdish - Latin Script),kmr
67
+ Korean,kor
68
+ Korean (vertical),kor_vert
69
+ Kurdish (Arabic Script),kur
70
+ Lao,lao
71
+ Latin,lat
72
+ Latvian,lav
73
+ Lithuanian,lit
74
+ Luxembourgish,ltz
75
+ Malayalam,mal
76
+ Marathi,mar
77
+ Macedonian,mkd
78
+ Maltese,mlt
79
+ Mongolian,mon
80
+ Maori,mri
81
+ Malay,msa
82
+ Burmese,mya
83
+ Nepali,nep
84
+ Dutch; Flemish,nld
85
+ Norwegian,nor
86
+ Occitan (post 1500),oci
87
+ Oriya,ori
88
+ Orientation and script detection module,osd
89
+ Panjabi; Punjabi,pan
90
+ Polish,pol
91
+ Portuguese,por
92
+ Pushto; Pashto,pus
93
+ Quechua,que
94
+ Romanian; Moldavian; Moldovan,ron
95
+ Russian,rus
96
+ Sanskrit,san
97
+ Sinhala; Sinhalese,sin
98
+ Slovak,slk
99
+ Slovak - Fraktur (contrib),slk_frak
100
+ Slovenian,slv
101
+ Sindhi,snd
102
+ Spanish; Castilian,spa
103
+ Spanish; Castilian - Old,spa_old
104
+ Albanian,sqi
105
+ Serbian,srp
106
+ Serbian - Latin,srp_latn
107
+ Sundanese,sun
108
+ Swahili,swa
109
+ Swedish,swe
110
+ Syriac,syr
111
+ Tamil,tam
112
+ Tatar,tat
113
+ Telugu,tel
114
+ Tajik,tgk
115
+ Tagalog (new - Filipino),tgl
116
+ Thai,tha
117
+ Tigrinya,tir
118
+ Tonga,ton
119
+ Turkish,tur
120
+ Uighur; Uyghur,uig
121
+ Ukrainian,ukr
122
+ Urdu,urd
123
+ Uzbek,uzb
124
+ Uzbek - Cyrilic,uzb_cyrl
125
+ Vietnamese,vie
126
+ Yiddish,yid
127
+ Yoruba,yor
files/template.pdf ADDED
Binary file (29.4 kB). View file
 
files/wo_content.png ADDED
packages.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ tesseract-ocr-all
2
+ poppler-utils
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ datasets
4
+ pytesseract
5
+ opencv-python
6
+ pdf2image
7
+ pypdf
8
+ langdetect
9
+ gradio