Spaces:
Sleeping
Sleeping
import os | |
import torch | |
from transformers import MarianMTModel, MarianTokenizer | |
import google.generativeai as genai | |
from huggingface_hub import InferenceApi, login | |
import gradio as gr | |
# Load API key from environment variable | |
os.environ["API_KEY"] = os.getenv("GOOGLE_API_KEY") # Set your API key in your environment variables | |
genai.configure(api_key=os.environ["API_KEY"]) | |
# Load Hugging Face token from environment variable | |
hf_token = os.getenv("HF_TOK") # Set your Hugging Face token in your environment variables | |
login(hf_token) # Log in using the Hugging Face token | |
diffusion_model = InferenceApi(repo_id="black-forest-labs/FLUX.1-schnell") | |
model = genai.GenerativeModel("gemini-1.5-flash") | |
# Load the pre-trained model for Tamil to English translation | |
translator_model_name = "Helsinki-NLP/opus-mt-mul-en" | |
tokenizer = MarianTokenizer.from_pretrained(translator_model_name) | |
translator = MarianMTModel.from_pretrained(translator_model_name) | |
def translate_text(tamil_text): | |
# Translate Tamil to English | |
inputs = tokenizer(tamil_text, return_tensors="pt", padding=True) | |
translated = translator.generate(**inputs) | |
return tokenizer.decode(translated[0], skip_special_tokens=True) | |
def generate_creative_writing(english_text): | |
# Generate creative writing | |
return model.generate_content("poem about" + english_text).text | |
def generate_image(prompt): | |
# Make an inference request to generate an image | |
response = diffusion_model(inputs=prompt, params={"guidance_scale": 7.5, "num_inference_steps": 50}) | |
return response # Ensure this is the correct format | |
def process_input(tamil_text): | |
try: | |
translated_text = translate_text(tamil_text) | |
creative_response = generate_creative_writing(translated_text) | |
generated_image = generate_image(translated_text) | |
return translated_text, creative_response, generated_image | |
except Exception as e: | |
return str(e), "Error occurred during processing", None | |
# Create a Gradio interface | |
iface = gr.Interface( | |
fn=process_input, | |
inputs="text", | |
outputs=["text", "text", "image"], | |
title="Creative Writing and Image Generation", | |
description="Enter Tamil text to get translated text, a creative response, and an image." | |
) | |
# Launch the Gradio app | |
iface.launch() | |