Spaces:
Running
Running
from flask import Flask, render_template, request | |
from nltk.tokenize import word_tokenize | |
from nltk.corpus import wordnet | |
from nltk import pos_tag, ne_chunk | |
import textblob | |
from polyglot.detect import Detector | |
import numpy as np | |
from keras.models import load_model | |
app = Flask(__name__) | |
model = load_model("emotion_detector.h5") | |
def index(): | |
return render_template("index.html") | |
def paraphrase(): | |
input_text = request.form["input_text"] | |
options = request.form.getlist("options") | |
# Remove special characters | |
if "remove_special_characters" in options: | |
input_text = remove_special_characters(input_text) | |
# Correct grammar | |
if "correct_grammar" in options: | |
input_text = correct_grammar(input_text) | |
# Summarize text | |
if "summarize_text" in options: | |
input_text = summarize_text(input_text) | |
# Multilingual support | |
target_language = request.form.get("target_language") | |
if target_language: | |
input_text = translate(input_text, target_language) | |
# Custom synonyms | |
custom_synonyms = request.form.getlist("custom_synonyms") | |
for word, synonym in custom_synonyms: | |
input_text = replace_word(input_text, word, synonym) | |
# Output customization | |
input_text = customise_output(input_text, options) | |
# Integration with other NLP tools | |
named_entities = get_named_entities(input_text) | |
part_of_speech = get_part_of_speech(input_text) | |
sentiment = get_sentiment(input_text) | |
# Emotion detector | |
emotion = detect_emotion(input_text) | |
input_text = adjust_tone(input_text, emotion) | |
return render_template("index.html", paraphrased_text=input_text, named_entities=named_entities, part_of_speech=part_of_speech, sentiment=sentiment) | |
def remove_special_characters(input_text): | |
# Code to remove special characters | |
return input_text | |
def summarize_text(input_text): | |
# Code to summarize the text | |
return input_text | |
def detect_language(input_text): | |
detector = Detector(input_text) | |
language = detector.language.code | |
return language | |
def translate(input_text, target_language): | |
blob = textblob.TextBlob(input_text) | |
translated_text = blob.translate(to=target_language) | |
return translated_text | |
def get_synonyms(word): | |
synonyms = [] | |
for syn in wordnet.synsets(word): | |
for lemma in syn.lemmas(): | |
synonyms.append(lemma.name()) | |
return synonyms | |
def replace_word(input_text, word, synonym): | |
words = word_tokenize(input_text) | |
words = [synonym if w == word else w for w in words] | |
input_text = " ".join(words) | |
return input_text | |
def customise_output(input_text, options): | |
# Code to customise output based on options | |
return input_text | |
def get_named_entities(input_text): | |
named_entities = ne_chunk(pos_tag(word_tokenize(input_text))) | |
return named_entities | |
def get_part_of_speech(input_text): | |
pos = pos_tag(word_tokenize(input_text)) | |
return pos | |
def get_sentiment(input_text): | |
blob = textblob.TextBlob(input_text) | |
sentiment = blob.sentiment.polarity | |
return sentiment | |
def correct_grammar(input_text): | |
blob = textblob.TextBlob(input_text) | |
corrected_text = str(blob.correct()) | |
return corrected_text | |
def detect_emotion(input_text): | |
words = word_tokenize(input_text) | |
words = [w.lower() for w in words] | |
words = [w for w in words if w.isalpha()] | |
input_text = " ".join(words) | |
input_text = np.array([input_text]) | |
sentiment = model.predict(input_text, batch_size=1, verbose=0)[0] | |
return sentiment | |
def adjust_tone(input_text, emotion): | |
# Code to adjust tone based on emotion | |
return input_text | |
if __name__ == "__main__": | |
app.run(debug=True,port=7860,host="0.0.0.0") |