fantaxy commited on
Commit
e5ec0df
ยท
verified ยท
1 Parent(s): c83463b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -29
app.py CHANGED
@@ -2,11 +2,43 @@
2
 
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
 
5
  import os
6
  import requests
 
 
 
7
 
8
- # ์ถ”๋ก  API ํด๋ผ์ด์–ธํŠธ ์„ค์ •
 
 
 
9
  hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus-08-2024", token=os.getenv("HF_TOKEN"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  def respond(
12
  message,
@@ -40,38 +72,57 @@ def respond(
40
  token = message.choices[0].delta.content
41
  if token is not None:
42
  response += token.strip("")
43
- yield response
 
 
 
 
 
44
  except Exception as e:
45
- yield f"Error: {str(e)}"
46
 
47
  # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ •
48
- interface = gr.ChatInterface(
49
- respond,
50
- additional_inputs=[
51
- gr.Textbox(label="System Message", value="Write(output) in ํ•œ๊ตญ์–ด."),
52
- gr.Slider(minimum=1, maximum=8000, value=7000, label="Max Tokens"),
53
- gr.Slider(minimum=0, maximum=1, value=0.7, label="Temperature"),
54
- gr.Slider(minimum=0, maximum=1, value=0.9, label="Top P"),
55
- ],
56
- examples=[
57
- ["ํŒํƒ€์ง€ ์†Œ์„ค์˜ ํฅ๋ฏธ๋กœ์šด ์†Œ์žฌ 10๊ฐ€์ง€๋ฅผ ์ œ์‹œํ•˜๋ผ"],
58
- ["๊ณ„์† ์ด์–ด์„œ ์ž‘์„ฑํ•˜๋ผ"],
59
- ["Translate into English"],
60
- ["๋งˆ๋ฒ• ์‹œ์Šคํ…œ์— ๋Œ€ํ•ด ๋” ์ž์„ธํžˆ ์„ค๋ช…ํ•˜๋ผ"],
61
- ["์ „ํˆฌ ์žฅ๋ฉด์„ ๋” ๊ทน์ ์œผ๋กœ ๋ฌ˜์‚ฌํ•˜๋ผ"],
62
- ["์ƒˆ๋กœ์šด ํŒํƒ€์ง€ ์ข…์กฑ์„ ์ถ”๊ฐ€ํ•˜๋ผ"],
63
- ["๊ณ ๋Œ€ ์˜ˆ์–ธ์— ๋Œ€ํ•ด ๋” ์ž์„ธํžˆ ์„ค๋ช…ํ•˜๋ผ"],
64
- ["์ฃผ์ธ๊ณต์˜ ๋‚ด๋ฉด ๋ฌ˜์‚ฌ๋ฅผ ์ถ”๊ฐ€ํ•˜๋ผ"],
65
- ],
66
- title="Fantasy Novel AI Generation",
67
- cache_examples=False,
68
- theme="Yntec/HaleyCH_Theme_Orange"
69
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
  # ์• ํ”Œ๋ฆฌ์ผ€์ด์…˜ ์‹คํ–‰
72
  if __name__ == "__main__":
73
  interface.launch(
74
- server_name="0.0.0.0", # ๋ชจ๋“  IP์—์„œ ์ ‘๊ทผ ๊ฐ€๋Šฅ
75
- server_port=7860, # ํฌํŠธ ์ง€์ •
76
- share=True # ๊ณต์œ  ๋งํฌ ์ƒ์„ฑ
77
- )
 
2
 
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
5
+ from gradio_client import Client
6
  import os
7
  import requests
8
+ import asyncio
9
+ import logging
10
+ from concurrent.futures import ThreadPoolExecutor
11
 
12
+ # ๋กœ๊น… ์„ค์ •
13
+ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
14
+
15
+ # API ์„ค์ •
16
  hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus-08-2024", token=os.getenv("HF_TOKEN"))
17
+ IMAGE_API_URL = "http://211.233.58.201:7896"
18
+
19
+ def generate_image(prompt: str) -> tuple:
20
+ """์ด๋ฏธ์ง€ ์ƒ์„ฑ ํ•จ์ˆ˜"""
21
+ try:
22
+ client = Client(IMAGE_API_URL)
23
+ # ํ”„๋กฌํ”„ํŠธ ์•ž์— "fantasy style," ์ถ”๊ฐ€
24
+ enhanced_prompt = f"fantasy style, {prompt}"
25
+ result = client.predict(
26
+ prompt=enhanced_prompt,
27
+ width=768,
28
+ height=768,
29
+ guidance=7.5,
30
+ inference_steps=30,
31
+ seed=3,
32
+ do_img2img=False,
33
+ init_image=None,
34
+ image2image_strength=0.8,
35
+ resize_img=True,
36
+ api_name="/generate_image"
37
+ )
38
+ return result[0], result[1]
39
+ except Exception as e:
40
+ logging.error(f"Image generation failed: {str(e)}")
41
+ return None, f"Error: {str(e)}"
42
 
43
  def respond(
44
  message,
 
72
  token = message.choices[0].delta.content
73
  if token is not None:
74
  response += token.strip("")
75
+ yield response, None # ์ด๋ฏธ์ง€๋ฅผ ์œ„ํ•œ None ์ถ”๊ฐ€
76
+
77
+ # ํ…์ŠคํŠธ ์ƒ์„ฑ์ด ์™„๋ฃŒ๋œ ํ›„ ์ด๋ฏธ์ง€ ์ƒ์„ฑ
78
+ image, seed = generate_image(response[:200]) # ์ฒ˜์Œ 200์ž๋ฅผ ์ด๋ฏธ์ง€ ํ”„๋กฌํ”„ํŠธ๋กœ ์‚ฌ์šฉ
79
+ yield response, image
80
+
81
  except Exception as e:
82
+ yield f"Error: {str(e)}", None
83
 
84
  # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ •
85
+ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange") as interface:
86
+ gr.Markdown("# Fantasy Novel AI Generation")
87
+
88
+ with gr.Row():
89
+ with gr.Column(scale=2):
90
+ chatbot = gr.Chatbot()
91
+ msg = gr.Textbox(label="Enter your message")
92
+ system_msg = gr.Textbox(label="System Message", value="Write(output) in ํ•œ๊ตญ์–ด.")
93
+
94
+ with gr.Row():
95
+ max_tokens = gr.Slider(minimum=1, maximum=8000, value=7000, label="Max Tokens")
96
+ temperature = gr.Slider(minimum=0, maximum=1, value=0.7, label="Temperature")
97
+ top_p = gr.Slider(minimum=0, maximum=1, value=0.9, label="Top P")
98
+
99
+ with gr.Column(scale=1):
100
+ image_output = gr.Image(label="Generated Image")
101
+
102
+ examples = gr.Examples(
103
+ examples=[
104
+ ["ํŒํƒ€์ง€ ์†Œ์„ค์˜ ํฅ๋ฏธ๋กœ์šด ์†Œ์žฌ 10๊ฐ€์ง€๋ฅผ ์ œ์‹œํ•˜๋ผ"],
105
+ ["๊ณ„์† ์ด์–ด์„œ ์ž‘์„ฑํ•˜๋ผ"],
106
+ ["Translate into English"],
107
+ ["๋งˆ๋ฒ• ์‹œ์Šคํ…œ์— ๋Œ€ํ•ด ๋” ์ž์„ธํžˆ ์„ค๋ช…ํ•˜๋ผ"],
108
+ ["์ „ํˆฌ ์žฅ๋ฉด์„ ๋” ๊ทน์ ์œผ๋กœ ๋ฌ˜์‚ฌํ•˜๋ผ"],
109
+ ["์ƒˆ๋กœ์šด ํŒํƒ€์ง€ ์ข…์กฑ์„ ์ถ”๊ฐ€ํ•˜๋ผ"],
110
+ ["๊ณ ๋Œ€ ์˜ˆ์–ธ์— ๋Œ€ํ•ด ๋” ์ž์„ธํžˆ ์„ค๋ช…ํ•˜๋ผ"],
111
+ ["์ฃผ์ธ๊ณต์˜ ๋‚ด๋ฉด ๋ฌ˜์‚ฌ๋ฅผ ์ถ”๊ฐ€ํ•˜๋ผ"],
112
+ ],
113
+ inputs=msg
114
+ )
115
+
116
+ msg.submit(
117
+ respond,
118
+ [msg, chatbot, system_msg, max_tokens, temperature, top_p],
119
+ [chatbot, image_output]
120
+ )
121
 
122
  # ์• ํ”Œ๋ฆฌ์ผ€์ด์…˜ ์‹คํ–‰
123
  if __name__ == "__main__":
124
  interface.launch(
125
+ server_name="0.0.0.0",
126
+ server_port=7860,
127
+ share=True
128
+ )