Spaces:
Running
Running
johnpaulbin
commited on
Commit
•
5abd3c7
1
Parent(s):
5b4b496
Update app.py
Browse files
app.py
CHANGED
@@ -2,143 +2,58 @@ from flask import Flask, request, jsonify
|
|
2 |
import asyncio
|
3 |
from hypercorn.asyncio import serve
|
4 |
from hypercorn.config import Config
|
5 |
-
import torch.nn.functional as F
|
6 |
-
from torch import nn
|
7 |
import os
|
8 |
os.environ['CURL_CA_BUNDLE'] = ''
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
from sentence_transformers import SentenceTransformer
|
14 |
-
sentencemodel = SentenceTransformer('johnpaulbin/toxic-gte-small-3')
|
15 |
-
|
16 |
-
USE_GPU = False
|
17 |
-
|
18 |
-
|
19 |
-
""" Use torchMoji to predict emojis from a single text input
|
20 |
-
"""
|
21 |
-
|
22 |
-
import numpy as np
|
23 |
-
import emoji, json
|
24 |
-
from torchmoji.global_variables import PRETRAINED_PATH, VOCAB_PATH
|
25 |
-
from torchmoji.sentence_tokenizer import SentenceTokenizer
|
26 |
-
from torchmoji.model_def import torchmoji_emojis
|
27 |
-
import torch
|
28 |
-
|
29 |
-
# Emoji map in emoji_overview.png
|
30 |
-
EMOJIS = ":joy: :unamused: :weary: :sob: :heart_eyes: \
|
31 |
-
:pensive: :ok_hand: :blush: :heart: :smirk: \
|
32 |
-
:grin: :notes: :flushed: :100: :sleeping: \
|
33 |
-
:relieved: :relaxed: :raised_hands: :two_hearts: :expressionless: \
|
34 |
-
:sweat_smile: :pray: :confused: :kissing_heart: :heartbeat: \
|
35 |
-
:neutral_face: :information_desk_person: :disappointed: :see_no_evil: :tired_face: \
|
36 |
-
:v: :sunglasses: :rage: :thumbsup: :cry: \
|
37 |
-
:sleepy: :yum: :triumph: :hand: :mask: \
|
38 |
-
:clap: :eyes: :gun: :persevere: :smiling_imp: \
|
39 |
-
:sweat: :broken_heart: :yellow_heart: :musical_note: :speak_no_evil: \
|
40 |
-
:wink: :skull: :confounded: :smile: :stuck_out_tongue_winking_eye: \
|
41 |
-
:angry: :no_good: :muscle: :facepunch: :purple_heart: \
|
42 |
-
:sparkling_heart: :blue_heart: :grimacing: :sparkles:".split(' ')
|
43 |
-
|
44 |
-
def top_elements(array, k):
|
45 |
-
ind = np.argpartition(array, -k)[-k:]
|
46 |
-
return ind[np.argsort(array[ind])][::-1]
|
47 |
-
|
48 |
-
|
49 |
-
with open("vocabulary.json", 'r') as f:
|
50 |
-
vocabulary = json.load(f)
|
51 |
-
|
52 |
-
st = SentenceTokenizer(vocabulary, 100)
|
53 |
|
54 |
-
|
55 |
|
56 |
-
if USE_GPU:
|
57 |
-
emojimodel.to("cuda:0")
|
58 |
|
59 |
-
def deepmojify(sentence, top_n=5, prob_only=False):
|
60 |
-
list_emojis = []
|
61 |
-
def top_elements(array, k):
|
62 |
-
ind = np.argpartition(array, -k)[-k:]
|
63 |
-
return ind[np.argsort(array[ind])][::-1]
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
tokenized = torch.tensor(tokenized).cuda() # then convert to PyTorch tensor
|
69 |
|
70 |
-
prob = emojimodel.forward(tokenized)[0]
|
71 |
-
if not USE_GPU:
|
72 |
-
prob = torch.tensor(prob)
|
73 |
-
if prob_only:
|
74 |
-
return prob
|
75 |
-
emoji_ids = top_elements(prob.cpu().numpy(), top_n)
|
76 |
-
emojis = map(lambda x: EMOJIS[x], emoji_ids)
|
77 |
-
list_emojis.append(emoji.emojize(f"{' '.join(emojis)}", language='alias'))
|
78 |
-
# returning the emojis as a list named as list_emojis
|
79 |
-
return list_emojis, prob
|
80 |
|
|
|
81 |
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
nn.BatchNorm1d(300), # Batch normalization
|
86 |
-
|
87 |
-
nn.Linear(300, 300), # Increase the number of neurons
|
88 |
-
nn.ReLU(),
|
89 |
-
nn.BatchNorm1d(300), # Batch normalization
|
90 |
-
|
91 |
-
nn.Linear(300, 200), # Increase the number of neurons
|
92 |
-
nn.ReLU(),
|
93 |
-
nn.BatchNorm1d(200), # Batch normalization
|
94 |
-
|
95 |
-
nn.Linear(200, 125), # Increase the number of neurons
|
96 |
-
nn.ReLU(),
|
97 |
-
nn.BatchNorm1d(125), # Batch normalization
|
98 |
-
|
99 |
-
nn.Linear(125, 2),
|
100 |
-
nn.Dropout(0.05) # Dropout
|
101 |
-
)
|
102 |
-
|
103 |
-
model.load_state_dict(torch.load("large.pth", map_location=torch.device('cpu')))
|
104 |
-
model.eval()
|
105 |
-
|
106 |
-
@app.route('/infer', methods=['POST'])
|
107 |
-
def translate():
|
108 |
-
data = request.get_json()
|
109 |
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
|
116 |
-
|
117 |
-
|
118 |
-
else:
|
119 |
-
output = "false"
|
120 |
|
121 |
-
|
|
|
|
|
|
|
|
|
122 |
|
123 |
-
|
124 |
-
|
125 |
-
def translateverbose():
|
126 |
data = request.get_json()
|
127 |
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
INPUT = torch.cat((probs, embedding))
|
132 |
-
output = F.softmax(model(INPUT.view(1, -1)), dim=1)
|
133 |
|
134 |
-
if
|
135 |
-
|
|
|
136 |
else:
|
137 |
-
|
138 |
-
|
139 |
-
return output
|
140 |
|
141 |
-
# Define more routes for other operations like download_model, etc.
|
142 |
if __name__ == "__main__":
|
143 |
config = Config()
|
144 |
config.bind = ["0.0.0.0:7860"] # You can specify the host and port here
|
|
|
2 |
import asyncio
|
3 |
from hypercorn.asyncio import serve
|
4 |
from hypercorn.config import Config
|
|
|
|
|
5 |
import os
|
6 |
os.environ['CURL_CA_BUNDLE'] = ''
|
7 |
|
8 |
+
def install_package():
|
9 |
+
command = "pip install git+https://github.com/johnpaulbin/googletranslate flask"
|
10 |
+
os.system(command)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
install_package()
|
13 |
|
|
|
|
|
14 |
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
from googletranslate import translate
|
17 |
+
import json
|
18 |
+
import random
|
|
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
+
app = Flask(__name__)
|
22 |
|
23 |
+
@app.route('/', methods=['GET'])
|
24 |
+
def home():
|
25 |
+
return "HI! Use /translate POST"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
+
# Load the JSON data into memory
|
28 |
+
def load_json_data(file_path):
|
29 |
+
with open(file_path, 'r') as file:
|
30 |
+
data = json.load(file)
|
31 |
+
return data
|
32 |
|
33 |
+
# Assuming your JSON structure is a list of dictionaries
|
34 |
+
json_data = load_json_data('englishspanishpairs.json')
|
|
|
|
|
35 |
|
36 |
+
@app.route('/spanish')
|
37 |
+
def random_spanish_pair():
|
38 |
+
# Select a random English-Spanish pair
|
39 |
+
random_pair = random.choice(json_data)
|
40 |
+
return jsonify(random_pair)
|
41 |
|
42 |
+
@app.route('/translate', methods=['POST'])
|
43 |
+
def dotranslate():
|
|
|
44 |
data = request.get_json()
|
45 |
|
46 |
+
txt = data.get('txt')
|
47 |
+
src = data.get('src', 'en')
|
48 |
+
dest = data.get('dest', 'es')
|
|
|
|
|
49 |
|
50 |
+
if txt:
|
51 |
+
translation = translate(txt, dest=dest, src=src)
|
52 |
+
return jsonify({'translation': translation}), 200
|
53 |
else:
|
54 |
+
return jsonify({'error': 'No text provided'}), 400
|
55 |
+
|
|
|
56 |
|
|
|
57 |
if __name__ == "__main__":
|
58 |
config = Config()
|
59 |
config.bind = ["0.0.0.0:7860"] # You can specify the host and port here
|