PRITHVI-V commited on
Commit
4a305fc
·
verified ·
1 Parent(s): c68a3e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -17
app.py CHANGED
@@ -14,14 +14,6 @@ hf_token = os.getenv("HF_TOK") # Set your Hugging Face token in your environmen
14
  login(hf_token) # Log in using the Hugging Face token
15
 
16
  model = genai.GenerativeModel("gemini-1.5-flash")
17
- diffusion_model = InferenceApi(repo_id="black-forest-labs/FLUX.1-schnell")
18
-
19
- def generate_response(tamil_text):
20
- translated_text = translate_text(tamil_text)
21
- creative_result = model.generate_content('poem about'+translated_text)
22
- response = diffusion_model(inputs=translated_text,
23
- params={"guidance_scale": 7.5, "num_inference_steps": 50})
24
- return translated_text, creative_result.text, response
25
 
26
  # Load the pre-trained model for Tamil to English translation
27
  translator_model_name = "Helsinki-NLP/opus-mt-mul-en"
@@ -35,21 +27,22 @@ def translate_text(tamil_text):
35
  return tokenizer.decode(translated[0], skip_special_tokens=True)
36
 
37
  def generate_creative_writing(english_text):
38
- # Placeholder for creative writing generation
39
- # Replace with your preferred model or API call
40
- return model.generate_content('poem about'+english_text).text
41
 
42
  def generate_image(prompt):
43
  # Make an inference request to generate an image
44
  response = diffusion_model(inputs=prompt, params={"guidance_scale": 7.5, "num_inference_steps": 50})
45
- return response # Assuming the response is a URL to the generated image
46
 
47
  def process_input(tamil_text):
48
- # Process input for translation, creative writing, and image generation
49
- translated_text = translate_text(tamil_text)
50
- creative_response = generate_creative_writing(translated_text)
51
- generated_image = generate_image(translated_text)
52
- return translated_text, creative_response, generated_image
 
 
53
 
54
  # Create a Gradio interface
55
  iface = gr.Interface(
@@ -62,3 +55,4 @@ iface = gr.Interface(
62
 
63
  # Launch the Gradio app
64
  iface.launch()
 
 
14
  login(hf_token) # Log in using the Hugging Face token
15
 
16
  model = genai.GenerativeModel("gemini-1.5-flash")
 
 
 
 
 
 
 
 
17
 
18
  # Load the pre-trained model for Tamil to English translation
19
  translator_model_name = "Helsinki-NLP/opus-mt-mul-en"
 
27
  return tokenizer.decode(translated[0], skip_special_tokens=True)
28
 
29
  def generate_creative_writing(english_text):
30
+ # Generate creative writing
31
+ return 'model.generate_content('poem about ' + english_text).text'
 
32
 
33
  def generate_image(prompt):
34
  # Make an inference request to generate an image
35
  response = diffusion_model(inputs=prompt, params={"guidance_scale": 7.5, "num_inference_steps": 50})
36
+ return response # Ensure this is the correct format
37
 
38
  def process_input(tamil_text):
39
+ try:
40
+ translated_text = translate_text(tamil_text)
41
+ creative_response = generate_creative_writing(translated_text)
42
+ generated_image = generate_image(translated_text)
43
+ return translated_text, creative_response, generated_image
44
+ except Exception as e:
45
+ return str(e), "Error occurred during processing", None
46
 
47
  # Create a Gradio interface
48
  iface = gr.Interface(
 
55
 
56
  # Launch the Gradio app
57
  iface.launch()
58
+