gokaygokay commited on
Commit
6f82089
1 Parent(s): 2a82c46

image generation delete

Browse files
Files changed (2) hide show
  1. huggingface_inference_node.py +0 -18
  2. ui_components.py +0 -25
huggingface_inference_node.py CHANGED
@@ -5,7 +5,6 @@ from datetime import datetime
5
  import anthropic
6
  from groq import Groq
7
  from openai import OpenAI
8
- from gradio_client import Client
9
 
10
  huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
11
  groq_api_key = os.getenv("GROQ_API_KEY")
@@ -23,8 +22,6 @@ class LLMInferenceNode:
23
  api_key=sambanova_api_key,
24
  base_url="https://api.sambanova.ai/v1",
25
  )
26
- self.huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
27
- self.flux_client = Client("KingNish/Realtime-FLUX", hf_token=self.huggingface_token)
28
 
29
  def generate(
30
  self,
@@ -180,18 +177,3 @@ You are allowed to make up film and branding names, and do them like 80's, 90's
180
  except Exception as e:
181
  print(f"An error occurred: {e}")
182
  return f"Error occurred while processing the request: {str(e)}"
183
-
184
- def generate_image(self, prompt, seed=42, width=1024, height=1024):
185
- try:
186
- result = self.flux_client.predict(
187
- prompt=prompt,
188
- seed=seed,
189
- width=width,
190
- height=height,
191
- api_name="/generate_image"
192
- )
193
- # Extract the image path from the result tuple
194
- image_path = result[0]
195
- return image_path
196
- except Exception as e:
197
- raise Exception(f"Error generating image: {str(e)}")
 
5
  import anthropic
6
  from groq import Groq
7
  from openai import OpenAI
 
8
 
9
  huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
10
  groq_api_key = os.getenv("GROQ_API_KEY")
 
22
  api_key=sambanova_api_key,
23
  base_url="https://api.sambanova.ai/v1",
24
  )
 
 
25
 
26
  def generate(
27
  self,
 
177
  except Exception as e:
178
  print(f"An error occurred: {e}")
179
  return f"Error occurred while processing the request: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ui_components.py CHANGED
@@ -141,14 +141,6 @@ def create_interface():
141
  generate_text_button = gr.Button("Generate Prompt with LLM")
142
  text_output = gr.Textbox(label="Generated Text", lines=10, show_copy_button=True)
143
 
144
- with gr.Column(scale=2):
145
- with gr.Accordion("Image Generation", open=True):
146
- image_output = gr.Image(label="Generated Image", type="filepath")
147
- generate_image_button = gr.Button("Generate Image")
148
- image_seed = gr.Number(label="Image Seed", value=42, step=1)
149
- image_width = gr.Slider(label="Width", minimum=512, maximum=2048, value=1024, step=64)
150
- image_height = gr.Slider(label="Height", minimum=512, maximum=2048, value=1024, step=64)
151
-
152
  def create_caption(image, model):
153
  if image is not None:
154
  if model == "Florence-2":
@@ -272,21 +264,4 @@ def create_interface():
272
  ]
273
  )
274
 
275
- # Function to generate image
276
- def generate_image(text, seed, width, height):
277
- try:
278
- image_path = llm_node.generate_image(text, seed=seed, width=width, height=height)
279
- print(f"Image generated: {image_path}")
280
- return image_path
281
- except Exception as e:
282
- print(f"An error occurred while generating the image: {e}")
283
- return None
284
-
285
- # Connect the image generation button
286
- generate_image_button.click(
287
- generate_image,
288
- inputs=[text_output, image_seed, image_width, image_height],
289
- outputs=[image_output]
290
- )
291
-
292
  return demo
 
141
  generate_text_button = gr.Button("Generate Prompt with LLM")
142
  text_output = gr.Textbox(label="Generated Text", lines=10, show_copy_button=True)
143
 
 
 
 
 
 
 
 
 
144
  def create_caption(image, model):
145
  if image is not None:
146
  if model == "Florence-2":
 
264
  ]
265
  )
266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
  return demo