Spaces:
Sleeping
Sleeping
saranbalan
commited on
Commit
•
2f88375
1
Parent(s):
4b526a8
Update app.py
Browse files
app.py
CHANGED
@@ -32,9 +32,19 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
32 |
|
33 |
model_id1 = "dreamlike-art/dreamlike-diffusion-1.0"
|
34 |
pipe = StableDiffusionPipeline.from_pretrained(model_id1, torch_dtype=torch.float16, use_safetensors=True)
|
35 |
-
pipe = pipe.to("
|
36 |
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
)
|
39 |
return response['choices'][0]['message']['content'].strip()
|
40 |
except Exception as e:
|
@@ -76,7 +86,7 @@ def process_audio(audio_path, image_option, creative_text_option):
|
|
76 |
try:
|
77 |
model_id1 = "dreamlike-art/dreamlike-diffusion-1.0"
|
78 |
pipe = StableDiffusionPipeline.from_pretrained(model_id1, torch_dtype=torch.float16, use_safetensors=True)
|
79 |
-
pipe = pipe.to("
|
80 |
image = pipe(translation).images[0]
|
81 |
except Exception as e:
|
82 |
return tamil_text, translation, creative_text, f"An error occurred during image generation: {str(e)}"
|
|
|
32 |
|
33 |
model_id1 = "dreamlike-art/dreamlike-diffusion-1.0"
|
34 |
pipe = StableDiffusionPipeline.from_pretrained(model_id1, torch_dtype=torch.float16, use_safetensors=True)
|
35 |
+
pipe = pipe.to("device")
|
36 |
|
37 |
+
# Updated function for text generation using the new API structure
|
38 |
+
def generate_creative_text(prompt):
|
39 |
+
try:
|
40 |
+
response = openai.ChatCompletion.create(
|
41 |
+
model="gpt-3.5-turbo", # Change this to the model you prefer, e.g., "gpt-4" if available
|
42 |
+
messages=[
|
43 |
+
{"role": "system", "content": "You are a creative assistant."},
|
44 |
+
{"role": "user", "content": prompt}
|
45 |
+
],
|
46 |
+
max_tokens=1024,
|
47 |
+
temperature=0.7,
|
48 |
)
|
49 |
return response['choices'][0]['message']['content'].strip()
|
50 |
except Exception as e:
|
|
|
86 |
try:
|
87 |
model_id1 = "dreamlike-art/dreamlike-diffusion-1.0"
|
88 |
pipe = StableDiffusionPipeline.from_pretrained(model_id1, torch_dtype=torch.float16, use_safetensors=True)
|
89 |
+
pipe = pipe.to("device")
|
90 |
image = pipe(translation).images[0]
|
91 |
except Exception as e:
|
92 |
return tamil_text, translation, creative_text, f"An error occurred during image generation: {str(e)}"
|