Soumen commited on
Commit
7af9178
·
1 Parent(s): a96fccc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -5
app.py CHANGED
@@ -180,11 +180,22 @@ def main():
180
  if st.checkbox("Spell Corrections for English"):
181
  st.success(TextBlob(text).correct())
182
  if st.checkbox("Text Generation"):
183
- tokenizer, model = load_models()
184
- input_ids = tokenizer(text, return_tensors='pt').input_ids
185
- st.text("Using Hugging Face Transformer, Contrastive Search ..")
186
- output = model.generate(input_ids, max_length=128)
187
- st.success(tokenizer.decode(output[0], skip_special_tokens=True))
 
 
 
 
 
 
 
 
 
 
 
188
  # if st.checkbox("Mark here, Text Summarization for English or Bangla!"):
189
  # st.subheader("Summarize Your Text for English and Bangla Texts!")
190
  # message = st.text_area("Enter the Text","Type please ..")
 
180
  if st.checkbox("Spell Corrections for English"):
181
  st.success(TextBlob(text).correct())
182
  if st.checkbox("Text Generation"):
183
+ API_URL = "https://api-inference.huggingface.co/models/gpt2-large"
184
+ headers = {"Authorization": "Bearer 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"}
185
+
186
+ def query(payload):
187
+ response = requests.post(API_URL, headers=headers, json=payload)
188
+ return response.json()
189
+
190
+ output = query({
191
+ "inputs": text,
192
+ })
193
+ st.success(output)
194
+ # tokenizer, model = load_models()
195
+ # input_ids = tokenizer(text, return_tensors='pt').input_ids
196
+ # st.text("Using Hugging Face Transformer, Contrastive Search ..")
197
+ # output = model.generate(input_ids, max_length=128)
198
+ #st.success(tokenizer.decode(output[0], skip_special_tokens=True))
199
  # if st.checkbox("Mark here, Text Summarization for English or Bangla!"):
200
  # st.subheader("Summarize Your Text for English and Bangla Texts!")
201
  # message = st.text_area("Enter the Text","Type please ..")