ayymen commited on
Commit
0117e7e
1 Parent(s): 61a6ac3

Add crnn_mobilenet_v3_large_tifinagh model

Browse files
Files changed (3) hide show
  1. README.md +69 -0
  2. config.json +21 -0
  3. pytorch_model.bin +3 -0
README.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language: en
4
+ ---
5
+
6
+ <p align="center">
7
+ <img src="https://doctr-static.mindee.com/models?id=v0.3.1/Logo_doctr.gif&src=0" width="60%">
8
+ </p>
9
+
10
+ **Optical Character Recognition made seamless & accessible to anyone, powered by TensorFlow 2 & PyTorch**
11
+
12
+ ## Task: recognition
13
+
14
+ https://github.com/mindee/doctr
15
+
16
+ ### Example usage:
17
+
18
+ ```python
19
+ >>> from doctr.io import DocumentFile
20
+ >>> from doctr.models import ocr_predictor, from_hub
21
+
22
+ >>> img = DocumentFile.from_images(['<image_path>'])
23
+ >>> # Load your model from the hub
24
+ >>> model = from_hub('mindee/my-model')
25
+
26
+ >>> # Pass it to the predictor
27
+ >>> # If your model is a recognition model:
28
+ >>> predictor = ocr_predictor(det_arch='db_mobilenet_v3_large',
29
+ >>> reco_arch=model,
30
+ >>> pretrained=True)
31
+
32
+ >>> # If your model is a detection model:
33
+ >>> predictor = ocr_predictor(det_arch=model,
34
+ >>> reco_arch='crnn_mobilenet_v3_small',
35
+ >>> pretrained=True)
36
+
37
+ >>> # Get your predictions
38
+ >>> res = predictor(img)
39
+ ```
40
+ ### Run Configuration
41
+
42
+ {
43
+ "arch": "crnn_mobilenet_v3_large",
44
+ "train_path": "train",
45
+ "val_path": "val",
46
+ "train_samples": 1000,
47
+ "val_samples": 20,
48
+ "font": "FreeMono.ttf,FreeSans.ttf,FreeSerif.ttf",
49
+ "min_chars": 1,
50
+ "max_chars": 12,
51
+ "name": "crnn_mobilenet_v3_large_tifinagh",
52
+ "epochs": 1,
53
+ "batch_size": 64,
54
+ "device": null,
55
+ "input_size": 32,
56
+ "lr": 0.001,
57
+ "weight_decay": 0,
58
+ "workers": 2,
59
+ "resume": "crnn_mobilenet_v3_large_tifinagh.pt",
60
+ "vocab": "tamazight",
61
+ "test_only": false,
62
+ "show_samples": false,
63
+ "wb": true,
64
+ "push_to_hub": true,
65
+ "pretrained": false,
66
+ "sched": "cosine",
67
+ "amp": false,
68
+ "find_lr": false
69
+ }
config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "mean": [
3
+ 0.694,
4
+ 0.695,
5
+ 0.693
6
+ ],
7
+ "std": [
8
+ 0.299,
9
+ 0.296,
10
+ 0.301
11
+ ],
12
+ "input_shape": [
13
+ 3,
14
+ 32,
15
+ 128
16
+ ],
17
+ "vocab": "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~°£€¥¢฿àâéèêëîïôùûüçÀÂÉÈÊËÎÏÔÙÛÜÇⴰⴱⴲⴳⴴⴵⴶⴷⴸⴹⴺⴻⴼⴽⴾⴿⵀⵁⵂⵃⵄⵅⵆⵇⵈⵉⵊⵋⵌⵍⵎⵏⵐⵑⵒⵓⵔⵕⵖⵗⵘⵙⵚⵛⵜⵝⵞⵟⵠⵡⵢⵣⵤⵥⵦⵧⵯ⵰⵿",
18
+ "url": "https://doctr-static.mindee.com/models?id=v0.3.1/crnn_mobilenet_v3_large_pt-f5259ec2.pt&src=0",
19
+ "arch": "crnn_mobilenet_v3_large",
20
+ "task": "recognition"
21
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea0351def493c60921ecec05dc9357fb539516b6a55a93352517cd214cf47772
3
+ size 18334309