OmParkashPandeY commited on
Commit
1ee720d
1 Parent(s): abbf252

Upload 6 files

Browse files
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .venv/
2
+ .env
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ from PIL import Image
4
+ from dotenv import load_dotenv, find_dotenv
5
+ _ = load_dotenv(find_dotenv()) # read local .env file
6
+ hf_api_key = os.environ['HF_API_KEY']
7
+
8
+ # Helper function
9
+ import requests, json
10
+
11
+ # API_URL = "https://api-inference.huggingface.co/models/sayakpaul/text-to-image-pokemons-gpt4"
12
+ # API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
13
+ # API_URL = "https://api-inference.huggingface.co/models/cloudqi/cqi_text_to_image_pt_v0"
14
+ # API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"
15
+ API_URL = "https://api-inference.huggingface.co/models/SimianLuo/LCM_Dreamshaper_v7"
16
+
17
+ #Text-to-image endpoint
18
+ def get_completion(inputs, parameters=None, ENDPOINT_URL=API_URL):
19
+ headers = {
20
+ "Authorization": f"Bearer {hf_api_key}",
21
+ "Content-Type": "application/json"
22
+ }
23
+ data = { "inputs": inputs }
24
+ if parameters is not None:
25
+ data.update({"parameters": parameters})
26
+ response = requests.request("POST",ENDPOINT_URL,headers=headers,data=json.dumps(data))
27
+ return response.content
28
+
29
+ import gradio as gr
30
+
31
+ def generate(prompt):
32
+ output = get_completion(prompt)
33
+ result_image = Image.open(io.BytesIO(output))
34
+ return result_image
35
+
36
+ import gradio as gr
37
+
38
+ def generate(prompt, negative_prompt, steps, guidance, width, height):
39
+ params = {
40
+ "negative_prompt": negative_prompt,
41
+ "num_inference_steps": steps,
42
+ "guidance_scale": guidance,
43
+ "width": width,
44
+ "height": height
45
+ }
46
+
47
+ output = get_completion(prompt, params)
48
+ pil_image = Image.open(io.BytesIO(output))
49
+ return pil_image
50
+
51
+ def loadGUI():
52
+ with gr.Blocks() as demo:
53
+ gr.Markdown("# Image Generation with Stable Diffusion")
54
+ with gr.Row():
55
+ with gr.Column(scale=4):
56
+ prompt = gr.Textbox(label="Your prompt") #Give prompt some real estate
57
+ with gr.Column(scale=1, min_width=50):
58
+ btn = gr.Button("Submit") #Submit button side by side!
59
+ with gr.Accordion("Advanced options", open=False): #Let's hide the advanced options!
60
+ negative_prompt = gr.Textbox(label="Negative prompt")
61
+ with gr.Row():
62
+ with gr.Column():
63
+ steps = gr.Slider(label="Inference Steps", minimum=1, maximum=100, step=32, value=25,
64
+ info="In many steps will the denoiser denoise the image?")
65
+ guidance = gr.Slider(label="Guidance Scale", minimum=1, maximum=20, step=32, value=7,
66
+ info="Controls how much the text prompt influences the result")
67
+ with gr.Column():
68
+ width = gr.Slider(label="Width", minimum=64, maximum=1024, step=32, value=512)
69
+ height = gr.Slider(label="Height", minimum=64, maximum=1024, step=32, value=512)
70
+ output = gr.Image(label="Result") #Move the output up too
71
+
72
+ btn.click(fn=generate, inputs=[prompt,negative_prompt,steps,guidance,width,height], outputs=[output])
73
+
74
+ gr.close_all()
75
+ demo.launch(share=True)
76
+
77
+ def main():
78
+ loadGUI()
79
+
80
+
81
+ if __name__ == "__main__":
82
+ main()
83
+
images/helicopter.jpg ADDED
images/maxresdefault.jpg ADDED
images/police-heli.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.25.0
2
+ aiofiles==23.2.1
3
+ altair==5.2.0
4
+ annotated-types==0.6.0
5
+ anyio==3.7.1
6
+ attrs==23.1.0
7
+ certifi==2023.11.17
8
+ charset-normalizer==3.3.2
9
+ click==8.1.7
10
+ colorama==0.4.6
11
+ contourpy==1.2.0
12
+ cycler==0.12.1
13
+ fastapi==0.104.1
14
+ ffmpy==0.3.1
15
+ filelock==3.13.1
16
+ fonttools==4.46.0
17
+ fsspec==2023.12.1
18
+ gradio==4.8.0
19
+ gradio_client==0.7.1
20
+ h11==0.14.0
21
+ httpcore==1.0.2
22
+ httpx==0.25.2
23
+ huggingface-hub==0.19.4
24
+ idna==3.6
25
+ importlib-resources==6.1.1
26
+ Jinja2==3.1.2
27
+ jsonschema==4.20.0
28
+ jsonschema-specifications==2023.11.2
29
+ kiwisolver==1.4.5
30
+ markdown-it-py==3.0.0
31
+ MarkupSafe==2.1.3
32
+ matplotlib==3.8.2
33
+ mdurl==0.1.2
34
+ mpmath==1.3.0
35
+ networkx==3.2.1
36
+ numpy==1.26.2
37
+ orjson==3.9.10
38
+ packaging==23.2
39
+ pandas==2.1.4
40
+ Pillow==10.1.0
41
+ psutil==5.9.6
42
+ pydantic==2.5.2
43
+ pydantic_core==2.14.5
44
+ pydub==0.25.1
45
+ Pygments==2.17.2
46
+ pyparsing==3.1.1
47
+ python-dateutil==2.8.2
48
+ python-dotenv==1.0.0
49
+ python-multipart==0.0.6
50
+ pytz==2023.3.post1
51
+ PyYAML==6.0.1
52
+ referencing==0.32.0
53
+ regex==2023.10.3
54
+ requests==2.31.0
55
+ rich==13.7.0
56
+ rpds-py==0.13.2
57
+ safetensors==0.4.1
58
+ semantic-version==2.10.0
59
+ shellingham==1.5.4
60
+ six==1.16.0
61
+ sniffio==1.3.0
62
+ starlette==0.27.0
63
+ sympy==1.12
64
+ tokenizers==0.15.0
65
+ tomlkit==0.12.0
66
+ toolz==0.12.0
67
+ torch==2.1.1
68
+ tqdm==4.66.1
69
+ transformers==4.35.2
70
+ typer==0.9.0
71
+ typing_extensions==4.8.0
72
+ tzdata==2023.3
73
+ urllib3==2.1.0
74
+ uvicorn==0.24.0.post1
75
+ websockets==11.0.3