Pavel Malov
Add old caching
489af12
import streamlit as st
from inference import InferenceModel
st.set_page_config(layout="wide")
st.title("ArxivTopicPicker")
st.write("This app helps define category of your scientific paper based on its name and abstract.")
name = st.text_input("Paste here name of your paper")
abstract = st.text_area("Paste here abstract of your paper")
@st.cache # πŸ‘ˆ Add the caching decorator
def load_model():
return InferenceModel()
model = load_model()
model.inference('load')
# if name != '':
# st.text("Your paper:\n\tName: " + name + '.\n\tAbstract: ' + abstract)
if st.button("Start processing"):
if name == '':
st.write('<p style="font-family:sans-serif; color:Red; font-size: 21px;">Please, provide name of the paper!πŸ™‡β€β™‚οΈ</p>', unsafe_allow_html=True)
else:
input_text = name + '. ' + abstract if abstract != '' else name + '.'
top_topics = model.inference(input_text)
if len(top_topics) == 0:
st.text("We don't know yet😰")
else:
st.text(top_topics)