openfree commited on
Commit
705c5b5
โ€ข
1 Parent(s): f2c0975

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -191
app.py CHANGED
@@ -1,222 +1,115 @@
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
 
4
  from typing import List, Tuple
5
 
6
- # Hugging Face ํ† ํฐ ์„ค์ •
7
- HF_TOKEN = os.getenv("HF_TOKEN")
8
-
9
- # Available LLM models
10
- LLM_MODELS = {
11
- "Mistral": "mistralai/Mistral-7B-Instruct-v0.2",
12
- "Zephyr": "HuggingFaceH4/zephyr-7b-beta",
13
- "OpenChat": "openchat/openchat-3.5",
14
- "Llama2": "meta-llama/Llama-2-7b-chat-hf",
15
- "Phi": "microsoft/phi-2",
16
- "Neural": "nvidia/neural-chat-7b-v3-1",
17
- "Starling": "HuggingFaceH4/starling-lm-7b-alpha"
18
- }
19
-
20
- # Default selected models
21
- DEFAULT_MODELS = [
22
- "mistralai/Mistral-7B-Instruct-v0.2",
23
- "HuggingFaceH4/zephyr-7b-beta",
24
- "openchat/openchat-3.5"
25
- ]
26
 
27
- # Initialize clients with token
28
- clients = {
29
- model: InferenceClient(model, token=HF_TOKEN)
30
- for model in LLM_MODELS.values()
31
- }
32
-
33
- def process_file(file) -> str:
34
  if file is None:
35
  return ""
36
- if file.name.endswith(('.txt', '.md')):
37
- return file.read().decode('utf-8')
38
- return f"Uploaded file: {file.name}"
 
 
 
 
 
 
 
 
39
 
40
- def respond_single(
41
- client,
42
- message: str,
43
  history: List[Tuple[str, str]],
44
- system_message: str,
45
- max_tokens: int,
46
- temperature: float,
47
- top_p: float,
 
 
 
 
48
  ):
49
- system_prefix = """๋ฐ˜๋“œ์‹œ ํ•œ๊ธ€๋กœ ๋‹ต๋ณ€ํ• ๊ฒƒ. ๋„ˆ๋Š” ์ฃผ์–ด์ง„ ๋‚ด์šฉ์„ ๊ธฐ๋ฐ˜์œผ๋กœ ์ƒ์„ธํ•œ ์„ค๋ช…๊ณผ Q&A๋ฅผ ์ œ๊ณตํ•˜๋Š” ์—ญํ• ์ด๋‹ค.
50
- ์•„์ฃผ ์นœ์ ˆํ•˜๊ณ  ์ž์„ธํ•˜๊ฒŒ ์„ค๋ช…ํ•˜๋ผ."""
 
 
 
 
51
 
52
- messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}]
 
 
 
53
 
54
- for user, assistant in history:
55
- if user:
56
- messages.append({"role": "user", "content": user})
57
- if assistant:
58
- messages.append({"role": "assistant", "content": assistant})
59
 
 
 
 
 
 
 
 
 
 
 
 
60
  messages.append({"role": "user", "content": message})
61
-
62
  response = ""
63
  try:
64
- for msg in client.chat_completion(
65
  messages,
66
  max_tokens=max_tokens,
67
  stream=True,
68
  temperature=temperature,
69
  top_p=top_p,
70
  ):
71
- if hasattr(msg.choices[0].delta, 'content'):
72
- token = msg.choices[0].delta.content
73
- if token is not None:
74
- response += token
75
- yield response
76
  except Exception as e:
77
- yield f"Error: {str(e)}"
78
-
79
- def respond_all(
80
- message: str,
81
- file,
82
- history1: List[Tuple[str, str]],
83
- history2: List[Tuple[str, str]],
84
- history3: List[Tuple[str, str]],
85
- selected_models: List[str],
86
- system_message: str,
87
- max_tokens: int,
88
- temperature: float,
89
- top_p: float,
90
- ):
91
- if file:
92
- file_content = process_file(file)
93
- message = f"{message}\n\nFile content:\n{file_content}"
94
-
95
- while len(selected_models) < 3:
96
- selected_models.append(selected_models[-1])
97
-
98
- def generate(client, history):
99
- return respond_single(
100
- client,
101
- message,
102
- history,
103
- system_message,
104
- max_tokens,
105
- temperature,
106
- top_p,
107
- )
108
-
109
- return (
110
- generate(clients[selected_models[0]], history1),
111
- generate(clients[selected_models[1]], history2),
112
- generate(clients[selected_models[2]], history3),
113
- )
114
-
115
- css = """
116
- footer {visibility: hidden}
117
- """
118
-
119
- with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
120
- with gr.Row():
121
- model_choices = gr.Checkboxgroup(
122
- choices=list(LLM_MODELS.values()),
123
- value=DEFAULT_MODELS,
124
- label="Select Models (Choose up to 3)",
125
- interactive=True
126
- )
127
-
128
- with gr.Row():
129
- with gr.Column():
130
- chat1 = gr.ChatInterface(
131
- lambda message, history: None,
132
- chatbot=gr.Chatbot(height=400, label="Chat 1"),
133
- textbox=False,
134
- )
135
- with gr.Column():
136
- chat2 = gr.ChatInterface(
137
- lambda message, history: None,
138
- chatbot=gr.Chatbot(height=400, label="Chat 2"),
139
- textbox=False,
140
- )
141
- with gr.Column():
142
- chat3 = gr.ChatInterface(
143
- lambda message, history: None,
144
- chatbot=gr.Chatbot(height=400, label="Chat 3"),
145
- textbox=False,
146
- )
147
-
148
- with gr.Row():
149
- with gr.Column():
150
- system_message = gr.Textbox(
151
- value="๋‹น์‹ ์€ ์นœ์ ˆํ•œ AI ์–ด์‹œ์Šคํ„ดํŠธ์ž…๋‹ˆ๋‹ค.",
152
- label="System message"
153
- )
154
- max_tokens = gr.Slider(
155
- minimum=1,
156
- maximum=8000,
157
- value=4000,
158
- step=1,
159
- label="Max new tokens"
160
- )
161
- temperature = gr.Slider(
162
- minimum=0,
163
- maximum=1,
164
- value=0.7,
165
- step=0.1,
166
- label="Temperature"
167
- )
168
- top_p = gr.Slider(
169
- minimum=0,
170
- maximum=1,
171
- value=0.9,
172
- step=0.05,
173
- label="Top-p"
174
- )
175
-
176
- with gr.Row():
177
- file_input = gr.File(label="Upload File (optional)")
178
- msg_input = gr.Textbox(
179
- show_label=False,
180
- placeholder="Enter text and press enter",
181
- container=False
182
- )
183
-
184
- examples = [
185
  ["์ƒ์„ธํ•œ ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์„ ๋งˆ์น˜ ํ™”๋ฉด์„ ๋ณด๋ฉด์„œ ์„ค๋ช…ํ•˜๋“ฏ์ด 4000 ํ† ํฐ ์ด์ƒ ์ž์„ธํžˆ ์„ค๋ช…ํ•˜๋ผ"],
186
  ["FAQ 20๊ฑด์„ ์ƒ์„ธํ•˜๊ฒŒ ์ž‘์„ฑํ•˜๋ผ. 4000ํ† ํฐ ์ด์ƒ ์‚ฌ์šฉํ•˜๋ผ."],
187
  ["์‚ฌ์šฉ ๋ฐฉ๋ฒ•๊ณผ ์ฐจ๋ณ„์ , ํŠน์ง•, ๊ฐ•์ ์„ ์ค‘์‹ฌ์œผ๋กœ 4000 ํ† ํฐ ์ด์ƒ ์œ ํŠœ๋ธŒ ์˜์ƒ ์Šคํฌ๋ฆฝํŠธ ํ˜•ํƒœ๋กœ ์ž‘์„ฑํ•˜๋ผ"],
188
  ["๋ณธ ์„œ๋น„์Šค๋ฅผ SEO ์ตœ์ ํ™”ํ•˜์—ฌ ๋ธ”๋กœ๊ทธ ํฌ์ŠคํŠธ๋กœ 4000 ํ† ํฐ ์ด์ƒ ์ž‘์„ฑํ•˜๋ผ"],
 
189
  ["๊ณ„์† ์ด์–ด์„œ ๋‹ต๋ณ€ํ•˜๋ผ"],
190
- ]
191
-
192
- gr.Examples(
193
- examples=examples,
194
- inputs=msg_input,
195
- cache_examples=False
196
- )
197
-
198
- def submit_message(message, file):
199
- return respond_all(
200
- message,
201
- file,
202
- chat1.chatbot.value,
203
- chat2.chatbot.value,
204
- chat3.chatbot.value,
205
- model_choices.value,
206
- system_message.value,
207
- max_tokens.value,
208
- temperature.value,
209
- top_p.value,
210
- )
211
-
212
- msg_input.submit(
213
- submit_message,
214
- [msg_input, file_input],
215
- [chat1.chatbot, chat2.chatbot, chat3.chatbot],
216
- api_name="submit"
217
- )
218
 
219
  if __name__ == "__main__":
220
- if not HF_TOKEN:
221
- print("Warning: HF_TOKEN environment variable is not set")
222
- demo.launch()
 
1
+
2
+
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
5
  import os
6
+ import pandas as pd
7
  from typing import List, Tuple
8
 
9
+ # ์ถ”๋ก  API ํด๋ผ์ด์–ธํŠธ ์„ค์ •
10
+ hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus-08-2024", token=os.getenv("HF_TOKEN"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ def read_uploaded_file(file):
 
 
 
 
 
 
13
  if file is None:
14
  return ""
15
+ try:
16
+ if file.name.endswith('.parquet'):
17
+ df = pd.read_parquet(file.name, engine='pyarrow')
18
+ return df.head(10).to_markdown(index=False)
19
+ else:
20
+ content = file.read()
21
+ if isinstance(content, bytes):
22
+ return content.decode('utf-8')
23
+ return content
24
+ except Exception as e:
25
+ return f"ํŒŒ์ผ์„ ์ฝ๋Š” ์ค‘ ์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}"
26
 
27
+ def respond(
28
+ message,
 
29
  history: List[Tuple[str, str]],
30
+ fashion_file, # ํŒŒ์ผ ์—…๋กœ๋“œ ์ž…๋ ฅ
31
+ uhd_file, # ํŒŒ์ผ ์—…๋กœ๋“œ ์ž…๋ ฅ
32
+ mixgen_file, # ํŒŒ์ผ ์—…๋กœ๋“œ ์ž…๋ ฅ
33
+ parquet_file, # ํŒŒ์ผ ์—…๋กœ๋“œ ์ž…๋ ฅ
34
+ system_message="",
35
+ max_tokens=1024,
36
+ temperature=0.7,
37
+ top_p=0.9,
38
  ):
39
+ system_prefix = """๋ฐ˜๋“œ์‹œ ํ•œ๊ธ€๋กœ ๋‹ต๋ณ€ํ• ๊ฒƒ. ๋„ˆ๋Š” ์ฃผ์–ด์ง„ ์†Œ์Šค์ฝ”๋“œ๋ฅผ ๊ธฐ๋ฐ˜์œผ๋กœ "์„œ๋น„์Šค ์‚ฌ์šฉ ์„ค๋ช… ๋ฐ ์•ˆ๋‚ด, Q&A๋ฅผ ํ•˜๋Š” ์—ญํ• ์ด๋‹ค". ์•„์ฃผ ์นœ์ ˆํ•˜๊ณ  ์ž์„ธํ•˜๊ฒŒ 4000ํ† ํฐ ์ด์ƒ Markdown ํ˜•์‹์œผ๋กœ ์ž‘์„ฑํ•˜๋ผ. ๋„ˆ๋Š” ์ฝ”๋“œ๋ฅผ ๊ธฐ๋ฐ˜์œผ๋กœ ์‚ฌ์šฉ ์„ค๋ช… ๋ฐ ์งˆ์˜ ์‘๋‹ต์„ ์ง„ํ–‰ํ•˜๋ฉฐ, ์ด์šฉ์ž์—๊ฒŒ ๋„์›€์„ ์ฃผ์–ด์•ผ ํ•œ๋‹ค. ์ด์šฉ์ž๊ฐ€ ๊ถ๊ธˆํ•ด ํ•  ๋งŒํ•œ ๋‚ด์šฉ์— ์นœ์ ˆํ•˜๊ฒŒ ์•Œ๋ ค์ฃผ๋„๋ก ํ•˜๋ผ. ์ฝ”๋“œ ์ „์ฒด ๋‚ด์šฉ์— ๋Œ€ํ•ด์„œ๋Š” ๋ณด์•ˆ์„ ์œ ์ง€ํ•˜๊ณ , ํ‚ค ๊ฐ’ ๋ฐ ์—”๋“œํฌ์ธํŠธ์™€ ๊ตฌ์ฒด์ ์ธ ๋ชจ๋ธ์€ ๊ณต๊ฐœํ•˜์ง€ ๋งˆ๋ผ."""
40
+
41
+ if message.lower() == "ํŒจ์…˜ ์ฝ”๋“œ ์‹คํ–‰" and fashion_file is not None:
42
+ fashion_content = read_uploaded_file(fashion_file)
43
+ system_message += f"\n\nํŒจ์…˜ ์ฝ”๋“œ ๋‚ด์šฉ:\n```python\n{fashion_content}\n```"
44
+ message = "ํŒจ์…˜ ๊ฐ€์ƒํ”ผํŒ…์— ๋Œ€ํ•œ ๋‚ด์šฉ์„ ํ•™์Šตํ•˜์˜€๊ณ , ์„ค๋ช…ํ•  ์ค€๋น„๊ฐ€ ๋˜์–ด์žˆ๋‹ค๊ณ  ์•Œ๋ฆฌ๊ณ  ์„œ๋น„์Šค URL(https://aiqcamp-fash.hf.space)์„ ํ†ตํ•ด ํ…Œ์ŠคํŠธ ํ•ด๋ณด๋ผ๊ณ  ์ถœ๋ ฅํ•˜๋ผ."
45
 
46
+ elif message.lower() == "uhd ์ด๋ฏธ์ง€ ์ฝ”๋“œ ์‹คํ–‰" and uhd_file is not None:
47
+ uhd_content = read_uploaded_file(uhd_file)
48
+ system_message += f"\n\nUHD ์ด๋ฏธ์ง€ ์ฝ”๋“œ ๋‚ด์šฉ:\n```python\n{uhd_content}\n```"
49
+ message = "UHD ์ด๋ฏธ์ง€ ์ƒ์„ฑ์— ๋Œ€ํ•œ ๋‚ด์šฉ์„ ํ•™์Šตํ•˜์˜€๊ณ , ์„ค๋ช…ํ•  ์ค€๋น„๊ฐ€ ๋˜์–ด์žˆ๋‹ค๊ณ  ์•Œ๋ฆฌ๊ณ  ์„œ๋น„์Šค URL(https://openfree-ultpixgen.hf.space)์„ ํ†ตํ•ด ํ…Œ์ŠคํŠธ ํ•ด๋ณด๋ผ๊ณ  ์ถœ๋ ฅํ•˜๋ผ."
50
 
51
+ elif message.lower() == "mixgen ์ฝ”๋“œ ์‹คํ–‰" and mixgen_file is not None:
52
+ mixgen_content = read_uploaded_file(mixgen_file)
53
+ system_message += f"\n\nMixGEN ์ฝ”๋“œ ๋‚ด์šฉ:\n```python\n{mixgen_content}\n```"
54
+ message = "MixGEN3 ์ด๋ฏธ์ง€ ์ƒ์„ฑ์— ๋Œ€ํ•œ ๋‚ด์šฉ์„ ํ•™์Šตํ•˜์˜€๊ณ , ์„ค๋ช…ํ•  ์ค€๋น„๊ฐ€ ๋˜์–ด์žˆ๋‹ค๊ณ  ์•Œ๋ฆฌ๊ณ  ์„œ๋น„์Šค URL(https://openfree-mixgen3.hf.space)์„ ํ†ตํ•ด ํ…Œ์ŠคํŠธ ํ•ด๋ณด๋ผ๊ณ  ์ถœ๋ ฅํ•˜๋ผ."
 
55
 
56
+ elif message.lower() == "test.parquet ์‹คํ–‰" and parquet_file is not None:
57
+ parquet_content = read_uploaded_file(parquet_file)
58
+ system_message += f"\n\ntest.parquet ํŒŒ์ผ ๋‚ด์šฉ:\n```markdown\n{parquet_content}\n```"
59
+ message = "test.parquet ํŒŒ์ผ์— ๋Œ€ํ•œ ๋‚ด์šฉ์„ ํ•™์Šตํ•˜์˜€๊ณ , ๊ด€๋ จ ์„ค๋ช… ๋ฐ Q&A๋ฅผ ์ง„ํ–‰ํ•  ์ค€๋น„๊ฐ€ ๋˜์–ด์žˆ๋‹ค. ๊ถ๊ธˆํ•œ ์ ์ด ์žˆ์œผ๋ฉด ๋ฌผ์–ด๋ณด๋ผ."
60
+
61
+ messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}]
62
+ for val in history:
63
+ if val[0]:
64
+ messages.append({"role": "user", "content": val[0]})
65
+ if val[1]:
66
+ messages.append({"role": "assistant", "content": val[1]})
67
  messages.append({"role": "user", "content": message})
68
+
69
  response = ""
70
  try:
71
+ for message in hf_client.chat_completion(
72
  messages,
73
  max_tokens=max_tokens,
74
  stream=True,
75
  temperature=temperature,
76
  top_p=top_p,
77
  ):
78
+ token = message.choices[0].delta.get('content', None)
79
+ if token:
80
+ response += token
81
+ yield response
 
82
  except Exception as e:
83
+ yield f"์ถ”๋ก  ์ค‘ ์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}"
84
+
85
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ •
86
+ demo = gr.ChatInterface(
87
+ respond,
88
+ additional_inputs=[
89
+ gr.File(label="Fashion Code File", file_types=[".cod", ".txt", ".py"]),
90
+ gr.File(label="UHD Image Code File", file_types=[".cod", ".txt", ".py"]),
91
+ gr.File(label="MixGEN Code File", file_types=[".cod", ".txt", ".py"]),
92
+ gr.File(label="Parquet File", file_types=[".parquet"]),
93
+ gr.Textbox(label="System Message", value=""),
94
+ gr.Slider(minimum=1, maximum=8000, value=4000, label="Max Tokens"),
95
+ gr.Slider(minimum=0, maximum=1, value=0.7, label="Temperature"),
96
+ gr.Slider(minimum=0, maximum=1, value=0.9, label="Top P"),
97
+ ],
98
+ examples=[
99
+ ["ํŒจ์…˜ ์ฝ”๋“œ ์‹คํ–‰"],
100
+ ["UHD ์ด๋ฏธ์ง€ ์ฝ”๋“œ ์‹คํ–‰"],
101
+ ["MixGEN ์ฝ”๋“œ ์‹คํ–‰"],
102
+ ["test.parquet ์‹คํ–‰"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  ["์ƒ์„ธํ•œ ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์„ ๋งˆ์น˜ ํ™”๋ฉด์„ ๋ณด๋ฉด์„œ ์„ค๋ช…ํ•˜๋“ฏ์ด 4000 ํ† ํฐ ์ด์ƒ ์ž์„ธํžˆ ์„ค๋ช…ํ•˜๋ผ"],
104
  ["FAQ 20๊ฑด์„ ์ƒ์„ธํ•˜๊ฒŒ ์ž‘์„ฑํ•˜๋ผ. 4000ํ† ํฐ ์ด์ƒ ์‚ฌ์šฉํ•˜๋ผ."],
105
  ["์‚ฌ์šฉ ๋ฐฉ๋ฒ•๊ณผ ์ฐจ๋ณ„์ , ํŠน์ง•, ๊ฐ•์ ์„ ์ค‘์‹ฌ์œผ๋กœ 4000 ํ† ํฐ ์ด์ƒ ์œ ํŠœ๋ธŒ ์˜์ƒ ์Šคํฌ๋ฆฝํŠธ ํ˜•ํƒœ๋กœ ์ž‘์„ฑํ•˜๋ผ"],
106
  ["๋ณธ ์„œ๋น„์Šค๋ฅผ SEO ์ตœ์ ํ™”ํ•˜์—ฌ ๋ธ”๋กœ๊ทธ ํฌ์ŠคํŠธ๋กœ 4000 ํ† ํฐ ์ด์ƒ ์ž‘์„ฑํ•˜๋ผ"],
107
+ ["ํŠนํ—ˆ ์ถœ์›์— ํ™œ์šฉํ•  ๊ธฐ์ˆ  ๋ฐ ๋น„์ฆˆ๋‹ˆ์Šค๋ชจ๋ธ ์ธก๋ฉด์„ ํฌํ•จํ•˜์—ฌ ํŠนํ—ˆ ์ถœ์›์„œ ๊ตฌ์„ฑ์— ๋งž๊ฒŒ ์ž‘์„ฑํ•˜๋ผ"],
108
  ["๊ณ„์† ์ด์–ด์„œ ๋‹ต๋ณ€ํ•˜๋ผ"],
109
+ ],
110
+ theme="Nymbo/Nymbo_Theme",
111
+ cache_examples=False,
112
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
  if __name__ == "__main__":
115
+ demo.launch()