Bashir Gulistani commited on
Commit
3e52413
·
unverified ·
1 Parent(s): a857577

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -181
app.py CHANGED
@@ -1,15 +1,8 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from PIL import Image, ImageEnhance
4
- import torch
5
- import os
6
- import numpy as np
7
- from torch.autograd import Variable
8
- from torchvision import transforms
9
- import torch.nn.functional as F
10
 
11
  # --- Model 1: AI Chatbot Setup ---
12
- client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3") # HuggingFaceH4/zephyr-7b-beta
13
 
14
  # Personalities for AI Chatbot
15
  PERSONALITIES = {
@@ -82,117 +75,7 @@ def generate_inspiration(history):
82
  def clear_conversation():
83
  return [], ""
84
 
85
-
86
-
87
-
88
- #######
89
-
90
- os.system("git clone https://github.com/xuebinqin/DIS")
91
- os.system("mv products/DIS/IS-Net/* .")
92
-
93
- from data_loader_cache import normalize, im_reader, im_preprocess
94
- from models import *
95
-
96
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
97
-
98
- if not os.path.exists("saved_models"):
99
- os.mkdir("saved_models")
100
- os.system("mv products/isnet.pth saved_models/")
101
-
102
- class GOSNormalize(object):
103
- def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
104
- self.mean = mean
105
- self.std = std
106
-
107
- def __call__(self, image):
108
- image = normalize(image, self.mean, self.std)
109
- return image
110
-
111
- transform = transforms.Compose([GOSNormalize([0.5, 0.5, 0.5], [1.0, 1.0, 1.0])])
112
-
113
- def load_image(im_path, hypar):
114
- im = im_reader(im_path)
115
- im, im_shp = im_preprocess(im, hypar["cache_size"])
116
- im = torch.divide(im, 255.0)
117
- shape = torch.from_numpy(np.array(im_shp))
118
- return transform(im).unsqueeze(0), shape.unsqueeze(0)
119
-
120
- def build_model(hypar, device):
121
- net = hypar["model"]
122
- if hypar["model_digit"] == "half":
123
- net.half()
124
- for layer in net.modules():
125
- if isinstance(layer, nn.BatchNorm2d):
126
- layer.float()
127
-
128
- net.to(device)
129
- if hypar["restore_model"] != "":
130
- net.load_state_dict(torch.load(hypar["model_path"] + "/" + hypar["restore_model"], map_location=device))
131
- net.eval()
132
- return net
133
-
134
- def predict(net, inputs_val, shapes_val, hypar, device):
135
- net.eval()
136
- if hypar["model_digit"] == "full":
137
- inputs_val = inputs_val.type(torch.FloatTensor)
138
- else:
139
- inputs_val = inputs_val.type(torch.HalfTensor)
140
-
141
- inputs_val_v = Variable(inputs_val, requires_grad=False).to(device)
142
- ds_val = net(inputs_val_v)[0]
143
- pred_val = ds_val[0][0, :, :, :]
144
- pred_val = torch.squeeze(F.upsample(torch.unsqueeze(pred_val, 0), (shapes_val[0][0], shapes_val[0][1]), mode='bilinear'))
145
-
146
- ma = torch.max(pred_val)
147
- mi = torch.min(pred_val)
148
- pred_val = (pred_val - mi) / (ma - mi)
149
-
150
- if device == 'cuda': torch.cuda.empty_cache()
151
- return (pred_val.detach().cpu().numpy() * 255).astype(np.uint8)
152
-
153
- hypar = {}
154
- hypar["model_path"] = "./saved_models"
155
- hypar["restore_model"] = "isnet.pth"
156
- hypar["interm_sup"] = False
157
- hypar["model_digit"] = "full"
158
- hypar["seed"] = 0
159
- hypar["cache_size"] = [1024, 1024]
160
- hypar["input_size"] = [1024, 1024]
161
- hypar["crop_size"] = [1024, 1024]
162
- hypar["model"] = ISNetDIS()
163
-
164
- net = build_model(hypar, device)
165
-
166
- def inference(image):
167
- image_path = image
168
- image_tensor, orig_size = load_image(image_path, hypar)
169
- mask = predict(net, image_tensor, orig_size, hypar, device)
170
- pil_mask = Image.fromarray(mask).convert('L')
171
- im_rgb = Image.open(image).convert("RGB")
172
- im_rgba = im_rgb.copy()
173
- im_rgba.putalpha(pil_mask)
174
- return [im_rgba, pil_mask]
175
-
176
- # Functions Added From Team
177
- def rotate_image(image, degrees):
178
- img = Image.open(image).rotate(degrees)
179
- return img
180
-
181
- def resize_image(image, width, height):
182
- img = Image.open(image).resize((width, height))
183
- return img
184
-
185
- def convert_to_grayscale(image):
186
- img = Image.open(image).convert('L')
187
- return img
188
-
189
- def adjust_brightness(image, brightness_factor):
190
- img = Image.open(image)
191
- enhancer = ImageEnhance.Brightness(img)
192
- img_enhanced = enhancer.enhance(brightness_factor)
193
- return img_enhanced
194
-
195
- # Custom CSS Added From Team
196
  custom_css = """
197
  body {
198
  background-color: #f0f0f0;
@@ -205,62 +88,9 @@ body {
205
  border-radius: 12px;
206
  box-shadow: 0px 4px 16px rgba(0, 0, 0, 0.2);
207
  }
208
- button.lg {
209
- background-color: #4CAF50;
210
- color: white;
211
- border: none;
212
- padding: 10px 20px;
213
- text-align: center;
214
- text-decoration: none;
215
- display: inline-block;
216
- font-size: 16px;
217
- margin: 4px 2px;
218
- transition-duration: 0.4s;
219
- cursor: pointer;
220
- border-radius: 8px;
221
- }
222
- button.lg:hover {
223
- background-color: #45a049;
224
- color: white;
225
- }
226
  """
227
 
228
- # Used Some Codes From Yang's Chatbot
229
- with gr.Blocks(css=custom_css) as background_remover_interface:
230
- gr.Markdown("<h1 style='text-align: center;'>🚩 Image Processor with Brightness Adjustment 🚩</h1>")
231
- with gr.Row():
232
- with gr.Column():
233
- input_image = gr.Image(label="Input Image", type='filepath')
234
- rotate_button = gr.Button("Rotate Image")
235
- resize_button = gr.Button("Resize Image")
236
- grayscale_button = gr.Button("Convert to Grayscale")
237
- brightness_slider = gr.Slider(minimum=0.5, maximum=2.0, step=0.1, value=1.0, label="Adjust Brightness")
238
- submit_button = gr.Button("Submit", variant="primary")
239
- clear_button = gr.Button("Clear", variant="secondary")
240
- with gr.Column():
241
- output_image = gr.Image(label="Output Image")
242
- mask_image = gr.Image(label="Mask")
243
-
244
- # AI Generated: Use Gradio Blocks to organize the interface with buttons
245
- rotate_button.click(rotate_image, inputs=[input_image, gr.Slider(minimum=0, maximum=360, step=1, value=90, label="Rotation Degrees")], outputs=output_image)
246
- resize_button.click(resize_image, inputs=[input_image, gr.Number(value=512, label="Width"), gr.Number(value=512, label="Height")], outputs=output_image)
247
- grayscale_button.click(convert_to_grayscale, inputs=input_image, outputs=output_image)
248
- # input_image
249
- brightness_slider.change(adjust_brightness, inputs=[input_image, brightness_slider], outputs=output_image)
250
-
251
- submit_button.click(inference, inputs=input_image, outputs=[output_image, mask_image])
252
-
253
- clear_button.click(lambda: (None, None, None), inputs=None, outputs=[input_image, output_image, mask_image])
254
-
255
-
256
-
257
-
258
-
259
- #####
260
-
261
-
262
- # --- Gradio Interfaces ---
263
- # AI Chatbot Interface
264
  with gr.Blocks(css=custom_css) as chatbot_interface:
265
  gr.Markdown("### AI Chatbot - Choose a personality and start chatting")
266
  personality = gr.Radio(choices=["Friendly", "Professional", "Humorous", "Empathetic"], value="Friendly", label="Personality")
@@ -278,12 +108,5 @@ with gr.Blocks(css=custom_css) as chatbot_interface:
278
  challenge_btn.click(generate_daily_challenge, inputs=history, outputs=chatbot)
279
  inspire_me_btn.click(generate_inspiration, inputs=history, outputs=chatbot)
280
 
281
-
282
-
283
-
284
- # Combine the two interfaces into a single app with tabs
285
- app = gr.TabbedInterface([chatbot_interface, background_remover_interface], ["AI Chatbot", "Background Remover"])
286
-
287
  # Launch the app
288
- #app.launch(share=True)
289
- app.launch(server_name="0.0.0.0", server_port=8040, share=True, enable_queue=True)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
3
 
4
  # --- Model 1: AI Chatbot Setup ---
5
+ client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
6
 
7
  # Personalities for AI Chatbot
8
  PERSONALITIES = {
 
75
  def clear_conversation():
76
  return [], ""
77
 
78
+ # Custom CSS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  custom_css = """
80
  body {
81
  background-color: #f0f0f0;
 
88
  border-radius: 12px;
89
  box-shadow: 0px 4px 16px rgba(0, 0, 0, 0.2);
90
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  """
92
 
93
+ # --- Gradio Interface ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  with gr.Blocks(css=custom_css) as chatbot_interface:
95
  gr.Markdown("### AI Chatbot - Choose a personality and start chatting")
96
  personality = gr.Radio(choices=["Friendly", "Professional", "Humorous", "Empathetic"], value="Friendly", label="Personality")
 
108
  challenge_btn.click(generate_daily_challenge, inputs=history, outputs=chatbot)
109
  inspire_me_btn.click(generate_inspiration, inputs=history, outputs=chatbot)
110
 
 
 
 
 
 
 
111
  # Launch the app
112
+ chatbot_interface.launch(share=True, enable_queue=True)