File size: 7,876 Bytes
76f42d9
66882a0
76f42d9
 
5f99735
 
 
66882a0
9419731
 
 
 
 
 
9e484af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98ceaf6
 
 
9e484af
 
9419731
 
9e484af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0a3b36
5f99735
 
 
9419731
5f99735
 
 
76f42d9
 
 
 
5f99735
 
 
 
2d857e8
 
d0a3b36
3640923
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76f42d9
d0a3b36
3640923
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5f99735
9419731
 
 
 
5f99735
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0a3b36
 
76f42d9
5f99735
d0a3b36
5f99735
d0a3b36
 
5f99735
d0a3b36
 
 
5f99735
 
 
d0a3b36
5f99735
 
d0a3b36
5f99735
d0a3b36
5f99735
 
 
d0a3b36
 
93294e9
76f42d9
5f99735
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
import streamlit as st
import PyPDF2
import io
import os
import re
import string
import nltk

# # Download NLTK resources
# nltk.download('words')

# # English words from NLTK corpus
# english_words = set(nltk.corpus.words.words())

# with open("index.dic") as f:
#     hunspell_words = {line.split("/")[0].strip() for line in f if not line.startswith("#")}

# def is_english_word(word):
#     return word.lower() in hunspell_words

from nltk.stem import WordNetLemmatizer, PorterStemmer
from nltk.corpus import words, wordnet
import spacy
from spellchecker import SpellChecker
import string

# Download necessary NLTK resources
nltk.download('wordnet')
nltk.download('words')

# Initialize tools
lemmatizer = WordNetLemmatizer()
stemmer = PorterStemmer()
english_words = set(words.words())
nlp = spacy.load("en_core_web_sm")  # SpaCy language model
spell = SpellChecker()  # Spell checker

import en_core_web_sm
nlp = en_core_web_sm.load()

# Combine dictionaries for better coverage
combined_dictionary = english_words.union(spell.word_frequency.keys())

def is_english_word(word):
    """
    Checks if a word is English and returns the valid English word or None if not recognized.
    """
    # Preprocess the word: strip punctuation and lowercase
    word_cleaned = word.lower().strip(string.punctuation)
    if not word_cleaned:
        return None

    # 1. Direct dictionary match
    if word_cleaned in combined_dictionary:
        return word_cleaned

    # 2. Lemmatization
    lemma = lemmatizer.lemmatize(word_cleaned)
    if lemma in combined_dictionary:
        return lemma

    # 3. Stemming
    stem = stemmer.stem(word_cleaned)
    if stem in combined_dictionary:
        return stem

    # 4. Spell checker
    corrected_word = spell.correction(word_cleaned)
    if corrected_word in combined_dictionary:
        return corrected_word

    # 5. SpaCy's language model (check if token is recognized as English)
    doc = nlp(word_cleaned)
    if doc and doc[0].is_alpha and doc[0].lang_ == "en":
        return word_cleaned

    return None



# Define Devanagari digits and patterns for matching
DEVANAGARI_DIGITS = {'०', '१', '२', '३', '४', '५', '६', '७', '८', '९', '१०'}
DEVANAGARI_PATTERN = re.compile(r'^[०-९]+(?:[.,/-][०-९]+)*$')  # Match Devanagari digits
NUMERIC_PATTERN = re.compile(r'^\d+(?:[.,/]\d+)*$')  # Match numeric patterns

# Unicode conversion mappings
unicodeatoz = ["ब", "द", "अ", "म", "भ", "ा", "न", "ज", "ष्", "व", "प", "ि", "फ", "ल", "य", "उ", "त्र", "च", "क", "त", "ग", "ख", "ध", "ह", "थ", "श"]
unicodeAtoZ = ["ब्", "ध", "ऋ", "म्", "भ्", "ँ", "न्", "ज्", "क्ष्", "व्", "प्", "ी", "ः", "ल्", "इ", "ए", "त्त", "च्", "क्", "त्", "ग्", "ख्", "ध्", "ह्", "थ्", "श्"]
unicode0to9 = ["ण्", "ज्ञ", "द्द", "घ", "द्ध", "छ", "ट", "ठ", "ड", "ढ"]
symbolsDict = {
    "~": "ञ्", "`": "ञ", "!": "१", "@": "२", "#": "३", "$": "४", "%": "५", "^": "६", "&": "७", "*": "८", "(": "९",
    ")": "०", "-": "(", "_": ")", "+": "ं", "[": "ृ", "{": "र्", "]": "े", "}": "ै", "\\": "्", "|": "्र", ";": "स",
    ":": "स्", "'": "ु", "\"": "ू", ",": ",", "<": "?", ".": "।", ">": "श्र", "/": "र", "?": "रु", "=": ".",
    "ˆ": "फ्", "Î": "ङ्ख", "å": "द्व", "÷": "/"
}

def normalizePreeti(preetitxt):
    normalized = ''
    previoussymbol = ''
    preetitxt = preetitxt.replace('qm', 's|')
    preetitxt = preetitxt.replace('f]', 'ो')
    preetitxt = preetitxt.replace('km', 'फ')
    preetitxt = preetitxt.replace('0f', 'ण')
    preetitxt = preetitxt.replace('If', 'क्ष')
    preetitxt = preetitxt.replace('if', 'ष')
    preetitxt = preetitxt.replace('cf', 'आ')
    index = -1
    while index + 1 < len(preetitxt):
        index += 1
        character = preetitxt[index]
        try:
            if preetitxt[index + 2] == '{':
                if preetitxt[index + 1] == 'f' or preetitxt[index + 1] == 'ो':
                    normalized += '{' + character + preetitxt[index + 1]
                    index += 2
                    continue
            if preetitxt[index + 1] == '{':
                if character != 'f':
                    normalized += '{' + character
                    index += 1
                    continue
        except IndexError:
            pass
        if character == 'l':
            previoussymbol = 'l'
            continue
        else:
            normalized += character + previoussymbol
            previoussymbol = ''
    return normalized

def convert(preeti):
    converted = ''
    normalizedpreeti = normalizePreeti(preeti)
    for index, character in enumerate(normalizedpreeti):
        try:
            if ord(character) >= 97 and ord(character) <= 122:
                converted += unicodeatoz[ord(character) - 97]
            elif ord(character) >= 65 and ord(character) <= 90:
                converted += unicodeAtoZ[ord(character) - 65]
            elif ord(character) >= 48 and ord(character) <= 57:
                converted += unicode0to9[ord(character) - 48]
            else:
                converted += symbolsDict[character]
        except KeyError:
            converted += character

    return converted

# def is_english_word(word):
#     """Check if a word is English."""
#     word = word.lower().strip(string.punctuation)
#     return word in english_words

def is_valid_numeric(word):
    """Check if the word is a valid numeric string."""
    return bool(NUMERIC_PATTERN.match(word))

def is_devanagari_digit(word):
    """Check if the word contains only Devanagari digits."""
    return bool(DEVANAGARI_PATTERN.match(word))

def process_text_word_by_word(page_text):
    """Process each word and retain or convert based on language."""
    processed_text = []
    words_in_page = page_text.split()

    for word in words_in_page:
        word_cleaned = word.strip(string.punctuation)
        if is_english_word(word_cleaned):
            processed_text.append(word)  # Retain English words
        elif is_devanagari_digit(word_cleaned):
            processed_text.append(word)  # Retain Devanagari digits
        elif is_valid_numeric(word_cleaned):
            processed_text.append(word)  # Retain numeric expressions
        else:
            processed_text.append(convert(word))  # Convert other words
    
    return ' '.join(processed_text)

def text_both_english_and_nepali(pdf_file):
    """Process text from each page of a PDF."""
    pages_with_english = []
    text = ""

    # Extract text from PDF
    reader = PyPDF2.PdfReader(pdf_file)
    for page_num, page in enumerate(reader.pages):
        page_text = page.extract_text()
        processed_text = process_text_word_by_word(page_text)
        text += f"\nPage {page_num + 1}:\n{processed_text}"
    return text

def main():
    st.title("Advanced PDF/TXT to Unicode Converter")

    uploaded_file = st.file_uploader("Upload a PDF or TXT file", type=["pdf", "txt"])

    if uploaded_file is not None:
        text = ""
        file_extension = os.path.splitext(uploaded_file.name)[1].lower()

        if file_extension == ".pdf":
            text = text_both_english_and_nepali(uploaded_file)
        elif file_extension == ".txt":
            text = process_text_word_by_word(uploaded_file.getvalue().decode("utf-8"))

        st.subheader("Processed Text")
        st.text_area("", value=text, height=400)

        # Download button for the processed text
        st.download_button(
            label="Download Processed Text",
            data=text.encode("utf-8"),
            file_name="processed_text.txt",
            mime="text/plain"
        )

if __name__ == "__main__":
    main()