Update app.py
Browse files
app.py
CHANGED
@@ -180,11 +180,22 @@ def main():
|
|
180 |
if st.checkbox("Spell Corrections for English"):
|
181 |
st.success(TextBlob(text).correct())
|
182 |
if st.checkbox("Text Generation"):
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
# if st.checkbox("Mark here, Text Summarization for English or Bangla!"):
|
189 |
# st.subheader("Summarize Your Text for English and Bangla Texts!")
|
190 |
# message = st.text_area("Enter the Text","Type please ..")
|
|
|
180 |
if st.checkbox("Spell Corrections for English"):
|
181 |
st.success(TextBlob(text).correct())
|
182 |
if st.checkbox("Text Generation"):
|
183 |
+
API_URL = "https://api-inference.huggingface.co/models/gpt2-large"
|
184 |
+
headers = {"Authorization": "Bearer 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"}
|
185 |
+
|
186 |
+
def query(payload):
|
187 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
188 |
+
return response.json()
|
189 |
+
|
190 |
+
output = query({
|
191 |
+
"inputs": text,
|
192 |
+
})
|
193 |
+
st.success(output)
|
194 |
+
# tokenizer, model = load_models()
|
195 |
+
# input_ids = tokenizer(text, return_tensors='pt').input_ids
|
196 |
+
# st.text("Using Hugging Face Transformer, Contrastive Search ..")
|
197 |
+
# output = model.generate(input_ids, max_length=128)
|
198 |
+
#st.success(tokenizer.decode(output[0], skip_special_tokens=True))
|
199 |
# if st.checkbox("Mark here, Text Summarization for English or Bangla!"):
|
200 |
# st.subheader("Summarize Your Text for English and Bangla Texts!")
|
201 |
# message = st.text_area("Enter the Text","Type please ..")
|