Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- .env +1 -0
- .gitignore +0 -0
- README.md +2 -8
- index.py +112 -0
- requirements.txt +4 -0
.env
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
REPLICATE_API_TOKEN=r8_DAzyOBdCwUdt0b26ZMPWLyvyHTh55uh2Lwb3c
|
.gitignore
ADDED
File without changes
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: pink
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.36.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: cmf-fine-tuned
|
3 |
+
app_file: index.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 4.36.1
|
|
|
|
|
6 |
---
|
|
|
|
index.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import base64
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
import io
|
6 |
+
import requests
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
import replicate
|
10 |
+
|
11 |
+
from dotenv import load_dotenv, find_dotenv
|
12 |
+
|
13 |
+
# Locate the .env file
|
14 |
+
dotenv_path = find_dotenv()
|
15 |
+
|
16 |
+
load_dotenv(dotenv_path)
|
17 |
+
|
18 |
+
REPLICATE_API_TOKEN = os.getenv('REPLICATE_API_TOKEN')
|
19 |
+
|
20 |
+
|
21 |
+
def image_classifier(prompt, starter_image, image_strength):
|
22 |
+
|
23 |
+
if starter_image is not None:
|
24 |
+
starter_image_pil = Image.fromarray(starter_image.astype('uint8'))
|
25 |
+
|
26 |
+
# Resize the starter image if either dimension is larger than 768 pixels
|
27 |
+
if starter_image_pil.size[0] > 512 or starter_image_pil.size[1] > 512:
|
28 |
+
# Calculate the new size while maintaining the aspect ratio
|
29 |
+
if starter_image_pil.size[0] > starter_image_pil.size[1]:
|
30 |
+
# Width is larger than height
|
31 |
+
new_width = 512
|
32 |
+
new_height = int((512 / starter_image_pil.size[0]) * starter_image_pil.size[1])
|
33 |
+
else:
|
34 |
+
# Height is larger than width
|
35 |
+
new_height = 512
|
36 |
+
new_width = int((512 / starter_image_pil.size[1]) * starter_image_pil.size[0])
|
37 |
+
|
38 |
+
# Resize the image
|
39 |
+
starter_image_pil = starter_image_pil.resize((new_width, new_height), Image.LANCZOS)
|
40 |
+
|
41 |
+
# Save the starter image to a bytes buffer
|
42 |
+
buffered = io.BytesIO()
|
43 |
+
starter_image_pil.save(buffered, format="JPEG")
|
44 |
+
|
45 |
+
# Encode the starter image to base64
|
46 |
+
starter_image_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
47 |
+
|
48 |
+
if starter_image is not None:
|
49 |
+
input = {
|
50 |
+
"width": 512,
|
51 |
+
"height": 512,
|
52 |
+
"prompt": prompt + " in the style of TOK",
|
53 |
+
#"refine": "expert_ensemble_refiner",
|
54 |
+
"apply_watermark": False,
|
55 |
+
"num_inference_steps": 25,
|
56 |
+
"num_outputs": 3,
|
57 |
+
"lora_scale": .96,
|
58 |
+
"image": "data:image/jpeg;base64," + starter_image_base64,
|
59 |
+
"prompt_strength": 1-image_strength,
|
60 |
+
}
|
61 |
+
else:
|
62 |
+
input = {
|
63 |
+
"width": 512,
|
64 |
+
"height": 512,
|
65 |
+
"prompt": prompt + " in the style of TOK",
|
66 |
+
#"refine": "expert_ensemble_refiner",
|
67 |
+
"apply_watermark": False,
|
68 |
+
"num_inference_steps": 25,
|
69 |
+
"num_outputs": 3,
|
70 |
+
"lora_scale": .96,
|
71 |
+
}
|
72 |
+
|
73 |
+
output = replicate.run(
|
74 |
+
# update to new trained model
|
75 |
+
"ltejedor/cmf:3af83ef60d86efbf374edb788fa4183a6067416e2fadafe709350dc1efe37d1d",
|
76 |
+
input=input
|
77 |
+
)
|
78 |
+
|
79 |
+
print(output)
|
80 |
+
|
81 |
+
# Download the image from the URL
|
82 |
+
image_url = output[0]
|
83 |
+
print(image_url)
|
84 |
+
response = requests.get(image_url)
|
85 |
+
print(response)
|
86 |
+
img1 = Image.open(io.BytesIO(response.content))
|
87 |
+
|
88 |
+
# Download the image from the URL
|
89 |
+
image_url = output[1]
|
90 |
+
print(image_url)
|
91 |
+
response = requests.get(image_url)
|
92 |
+
print(response)
|
93 |
+
img2 = Image.open(io.BytesIO(response.content))
|
94 |
+
|
95 |
+
# Download the image from the URL
|
96 |
+
image_url = output[2]
|
97 |
+
print(image_url)
|
98 |
+
response = requests.get(image_url)
|
99 |
+
print(response)
|
100 |
+
img3 = Image.open(io.BytesIO(response.content))
|
101 |
+
|
102 |
+
return [img1, img2, img3]
|
103 |
+
|
104 |
+
|
105 |
+
# app = Flask(__name__)
|
106 |
+
# os.environ.get("REPLICATE_API_TOKEN")
|
107 |
+
|
108 |
+
# @app.route("/")
|
109 |
+
# def index():
|
110 |
+
|
111 |
+
demo = gr.Interface(fn=image_classifier, inputs=["text", "image", gr.Slider(0, 1, step=0.025, value=0.2, label="Image Strength")], outputs=["image", "image", "image"])
|
112 |
+
demo.launch(share=False)
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Flask==3.0.0
|
2 |
+
replicate==0.26.0
|
3 |
+
gradio
|
4 |
+
openai
|