ntam0001 commited on
Commit
dea2cef
1 Parent(s): acb75f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -21
app.py CHANGED
@@ -1,41 +1,44 @@
1
  import gradio as gr
2
  from transformers import MarianMTModel, MarianTokenizer
3
 
 
 
 
4
  # Define a function that loads a model and tokenizer based on the chosen language
5
  def load_model(lang_pair):
6
- if lang_pair == "English to French":
7
- model_name = 'Helsinki-NLP/opus-mt-en-fr'
8
- elif lang_pair == "Kinyarwanda to English":
9
- model_name = 'Helsinki-NLP/opus-mt-rw-en'
10
- else:
11
- raise ValueError("Unsupported language pair")
12
 
13
  tokenizer = MarianTokenizer.from_pretrained(model_name)
14
  model = MarianMTModel.from_pretrained(model_name)
15
  return model, tokenizer
16
 
 
 
 
 
 
 
 
 
 
 
 
17
  # Function to translate text based on selected language
18
  def translate(lang_pair, text):
19
  model, tokenizer = load_model(lang_pair)
20
-
21
- # Tokenize the text using the __call__ method
22
- model_inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
23
-
24
- # Perform the translation
25
- gen = model.generate(**model_inputs)
26
-
27
- # Decode the generated tokens to string
28
- translation = tokenizer.batch_decode(gen, skip_special_tokens=True)
29
- return translation[0]
30
 
31
  # Create a Gradio interface with a dropdown menu for language selection
32
  iface = gr.Interface(
33
  fn=translate,
34
- inputs=[
35
- gr.Dropdown(choices=["English to French", "Kinyarwanda to English"], label="Select Language"),
36
- gr.Textbox(lines=2, placeholder="Enter Message...")
37
- ],
38
- outputs=gr.Textbox()
39
  )
40
 
41
  # Launch the interface
 
1
  import gradio as gr
2
  from transformers import MarianMTModel, MarianTokenizer
3
 
4
+ # Assuming the environment is set up for GPU use if available
5
+ # This is more about the environment setup than code modification
6
+
7
  # Define a function that loads a model and tokenizer based on the chosen language
8
  def load_model(lang_pair):
9
+ model_name = {
10
+ "English to French": 'Helsinki-NLP/opus-mt-en-fr',
11
+ "Kinyarwanda to English": 'Helsinki-NLP/opus-mt-rw-en'
12
+ }[lang_pair]
 
 
13
 
14
  tokenizer = MarianTokenizer.from_pretrained(model_name)
15
  model = MarianMTModel.from_pretrained(model_name)
16
  return model, tokenizer
17
 
18
+ # Example function that could be used for caching (conceptual implementation)
19
+ cache = {}
20
+ def get_translation_from_cache_or_model(model, tokenizer, text):
21
+ if text in cache:
22
+ return cache[text]
23
+ model_inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
24
+ gen = model.generate(**model_inputs)
25
+ translation = tokenizer.batch_decode(gen, skip_special_tokens=True)[0]
26
+ cache[text] = translation
27
+ return translation
28
+
29
  # Function to translate text based on selected language
30
  def translate(lang_pair, text):
31
  model, tokenizer = load_model(lang_pair)
32
+ # Use the caching function
33
+ translation = get_translation_from_cache_or_model(model, tokenizer, text)
34
+ return translation
 
 
 
 
 
 
 
35
 
36
  # Create a Gradio interface with a dropdown menu for language selection
37
  iface = gr.Interface(
38
  fn=translate,
39
+ inputs=[gr.Dropdown(choices=["English to French", "Kinyarwanda to English"], label="Select Language Pair"),
40
+ gr.Textbox(lines=2, placeholder="Enter Text...")],
41
+ outputs=gr.Textbox(label="Translation")
 
 
42
  )
43
 
44
  # Launch the interface