Soumen commited on
Commit
c9a18bc
1 Parent(s): ad33706

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -52,8 +52,8 @@ import line_cor
52
  import altair as alt
53
  #pytesseract.pytesseract.tesseract_cmd = r"./Tesseract-OCR/tesseract.exe"
54
  from PIL import Image
55
- #@st.experimental_singleton
56
- @st.cache_resource(experimental_allow_widgets=True)
57
  def read_pdf(file):
58
  # images=pdf2image.convert_from_path(file)
59
  # # print(type(images))
@@ -87,23 +87,23 @@ def read_pdf(file):
87
  # all_page_text += text + " " #page.extractText()
88
  # return all_page_text
89
  st.title("NLP APPLICATION")
90
- #@st.experimental_singleton
91
- @st.cache_resource(experimental_allow_widgets=True)
92
  def text_analyzer(my_text):
93
  nlp = spacy.load('en_core_web_sm')
94
  docx = nlp(my_text)
95
  # tokens = [ token.text for token in docx]
96
  allData = [('"Token":{},\n"Lemma":{}'.format(token.text,token.lemma_))for token in docx ]
97
  return allData
98
- #@st.experimental_singleton
99
- @st.cache_resource(experimental_allow_widgets=True)
100
  def load_models():
101
  tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
102
  model = GPT2LMHeadModel.from_pretrained('gpt2-large')
103
  return tokenizer, model
104
  # Function For Extracting Entities
105
- #@st.experimental_singleton
106
- @st.cache_resource(experimental_allow_widgets=True)
107
  def entity_analyzer(my_text):
108
  nlp = spacy.load('en_core_web_sm')
109
  docx = nlp(my_text)
 
52
  import altair as alt
53
  #pytesseract.pytesseract.tesseract_cmd = r"./Tesseract-OCR/tesseract.exe"
54
  from PIL import Image
55
+ @st.experimental_singleton
56
+ #@st.cache_resource(experimental_allow_widgets=True)
57
  def read_pdf(file):
58
  # images=pdf2image.convert_from_path(file)
59
  # # print(type(images))
 
87
  # all_page_text += text + " " #page.extractText()
88
  # return all_page_text
89
  st.title("NLP APPLICATION")
90
+ @st.experimental_singleton
91
+ #@st.cache_resource(experimental_allow_widgets=True)
92
  def text_analyzer(my_text):
93
  nlp = spacy.load('en_core_web_sm')
94
  docx = nlp(my_text)
95
  # tokens = [ token.text for token in docx]
96
  allData = [('"Token":{},\n"Lemma":{}'.format(token.text,token.lemma_))for token in docx ]
97
  return allData
98
+ @st.experimental_singleton
99
+ #@st.cache_resource(experimental_allow_widgets=True)
100
  def load_models():
101
  tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
102
  model = GPT2LMHeadModel.from_pretrained('gpt2-large')
103
  return tokenizer, model
104
  # Function For Extracting Entities
105
+ @st.experimental_singleton
106
+ #@st.cache_resource(experimental_allow_widgets=True)
107
  def entity_analyzer(my_text):
108
  nlp = spacy.load('en_core_web_sm')
109
  docx = nlp(my_text)