saranbalan commited on
Commit
7318986
1 Parent(s): 64121ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -10
app.py CHANGED
@@ -5,26 +5,32 @@ from groq import Groq
5
  from deep_translator import GoogleTranslator
6
  from diffusers import StableDiffusionPipeline
7
  import torch
 
8
 
9
- # Set up Groq API key
10
- api_key = os.getenv("GROQ_API_KEY")
11
  client = Groq(api_key=api_key)
12
 
 
 
 
 
13
  # Set device: CUDA if available, else CPU
14
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
15
 
16
  # Load Whisper model (if using locally, else use API as in original code)
17
- # This is assuming you're using Whisper locally, if not, the client API is used.
18
  whisper_model = whisper.load_model("base")
19
 
20
  # Model IDs for Stable Diffusion pipelines
21
- HF_API_KEY = "https://huggingface.co/ByteDance/SDXL-Lightning/resolve/main/sdxl_lightning_1step_x0.safetensors"
 
 
22
 
23
  # Initialize Stable Diffusion pipeline based on device
24
  if torch.cuda.is_available():
25
- pipe = StableDiffusionPipeline.from_pretrained(HF_API_KEY, torch_dtype=torch.float16)
26
  else:
27
- pipe = StableDiffusionPipeline.from_pretrained(HF_API_KEY) # Omit torch_dtype for CPU
28
 
29
  # Move model to the selected device (either GPU or CPU)
30
  pipe = pipe.to(device)
@@ -58,12 +64,12 @@ def process_audio(audio_path, image_option):
58
  image = None
59
  if image_option == "Generate Image":
60
  try:
61
- model_id1 = "dreamlike-art/dreamlike-diffusion-1.0"
62
- pipe = StableDiffusionPipeline.from_pretrained(model_id1, torch_dtype=torch.float16, use_safetensors=True)
63
- pipe = pipe.to("cuda")
64
  image = pipe(translation).images[0]
65
  except Exception as e:
66
- return tamil_text, translation, f"An error occurred during image generation: {str(e)}"
67
 
68
  return tamil_text, translation, image
69
 
 
5
  from deep_translator import GoogleTranslator
6
  from diffusers import StableDiffusionPipeline
7
  import torch
8
+ import huggingface_hub
9
 
10
+ # Replace with your actual Groq API key
11
+ api_key = "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"
12
  client = Groq(api_key=api_key)
13
 
14
+ # Set Hugging Face API key
15
+ HF_API_KEY = "https://huggingface.co/ByteDance/SDXL-Lightning/resolve/main/sdxl_lightning_1step_x0.safetensors"
16
+ huggingface_hub.login(HF_API_KEY)
17
+
18
  # Set device: CUDA if available, else CPU
19
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
20
 
21
  # Load Whisper model (if using locally, else use API as in original code)
 
22
  whisper_model = whisper.load_model("base")
23
 
24
  # Model IDs for Stable Diffusion pipelines
25
+ model_id1 = "dreamlike-art/dreamlike-diffusion-1.0"
26
+ model_id2 = "stabilityai/stable-diffusion-xl-base-1.0"
27
+ restricted_model_id = "ByteDance/SDXL-Lightning" # Model you want to access using HF_API_KEY
28
 
29
  # Initialize Stable Diffusion pipeline based on device
30
  if torch.cuda.is_available():
31
+ pipe = StableDiffusionPipeline.from_pretrained(model_id2, torch_dtype=torch.float16)
32
  else:
33
+ pipe = StableDiffusionPipeline.from_pretrained(model_id2) # Omit torch_dtype for CPU
34
 
35
  # Move model to the selected device (either GPU or CPU)
36
  pipe = pipe.to(device)
 
64
  image = None
65
  if image_option == "Generate Image":
66
  try:
67
+ # Use the Hugging Face API key to load the restricted model for image generation
68
+ pipe = StableDiffusionPipeline.from_pretrained(restricted_model_id, torch_dtype=torch.float16, use_auth_token=HF_API_KEY)
69
+ pipe = pipe.to(device)
70
  image = pipe(translation).images[0]
71
  except Exception as e:
72
+ return tamil_text, translation, f"An error occurred during image generation: {str(e)}", None
73
 
74
  return tamil_text, translation, image
75