Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import openai
|
4 |
import anthropic
|
|
|
5 |
from typing import Optional
|
6 |
|
7 |
#############################
|
@@ -21,11 +22,12 @@ MODELS = {
|
|
21 |
# Cohere Command R+ 모델 ID 정의
|
22 |
COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
|
23 |
|
24 |
-
def get_client(model_name
|
25 |
"""
|
26 |
모델 이름에 맞춰 InferenceClient 생성.
|
27 |
-
|
28 |
"""
|
|
|
29 |
if not hf_token:
|
30 |
raise ValueError("HuggingFace API 토큰이 필요합니다.")
|
31 |
|
@@ -44,14 +46,13 @@ def respond_hf_qna(
|
|
44 |
max_tokens: int,
|
45 |
temperature: float,
|
46 |
top_p: float,
|
47 |
-
system_message: str
|
48 |
-
hf_token: str
|
49 |
):
|
50 |
"""
|
51 |
HuggingFace 모델(Zephyr 등)에 대해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
52 |
"""
|
53 |
try:
|
54 |
-
client = get_client(model_name
|
55 |
except ValueError as e:
|
56 |
return f"오류: {str(e)}"
|
57 |
|
@@ -80,15 +81,14 @@ def respond_cohere_qna(
|
|
80 |
system_message: str,
|
81 |
max_tokens: int,
|
82 |
temperature: float,
|
83 |
-
top_p: float
|
84 |
-
hf_token: str
|
85 |
):
|
86 |
"""
|
87 |
Cohere Command R+ 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
88 |
"""
|
89 |
model_name = "Cohere Command R+"
|
90 |
try:
|
91 |
-
client = get_client(model_name
|
92 |
except ValueError as e:
|
93 |
return f"오류: {str(e)}"
|
94 |
|
@@ -115,12 +115,12 @@ def respond_chatgpt_qna(
|
|
115 |
system_message: str,
|
116 |
max_tokens: int,
|
117 |
temperature: float,
|
118 |
-
top_p: float
|
119 |
-
openai_token: str
|
120 |
):
|
121 |
"""
|
122 |
ChatGPT(OpenAI) 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
123 |
"""
|
|
|
124 |
if not openai_token:
|
125 |
return "OpenAI API 토큰이 필요합니다."
|
126 |
|
@@ -150,12 +150,12 @@ def respond_deepseek_qna(
|
|
150 |
system_message: str,
|
151 |
max_tokens: int,
|
152 |
temperature: float,
|
153 |
-
top_p: float
|
154 |
-
deepseek_token: str
|
155 |
):
|
156 |
"""
|
157 |
DeepSeek 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
158 |
"""
|
|
|
159 |
if not deepseek_token:
|
160 |
return "DeepSeek API 토큰이 필요합니다."
|
161 |
|
@@ -186,12 +186,12 @@ def respond_claude_qna(
|
|
186 |
system_message: str,
|
187 |
max_tokens: int,
|
188 |
temperature: float,
|
189 |
-
top_p: float
|
190 |
-
claude_api_key: str
|
191 |
) -> str:
|
192 |
"""
|
193 |
Claude API를 사용한 개선된 응답 생성 함수
|
194 |
"""
|
|
|
195 |
if not claude_api_key:
|
196 |
return "Claude API 토큰이 필요합니다."
|
197 |
|
@@ -229,30 +229,6 @@ def respond_claude_qna(
|
|
229 |
with gr.Blocks() as demo:
|
230 |
gr.Markdown("# LLM 플레이그라운드")
|
231 |
|
232 |
-
# 한 줄에 세 토큰 텍스트박스 배치
|
233 |
-
with gr.Row():
|
234 |
-
hf_token_box = gr.Textbox(
|
235 |
-
label="HuggingFace 토큰",
|
236 |
-
type="password",
|
237 |
-
placeholder="HuggingFace API 토큰을 입력하세요..."
|
238 |
-
)
|
239 |
-
openai_token_box = gr.Textbox(
|
240 |
-
label="OpenAI 토큰",
|
241 |
-
type="password",
|
242 |
-
placeholder="OpenAI API 토큰을 입력하세요..."
|
243 |
-
)
|
244 |
-
claude_token_box = gr.Textbox(
|
245 |
-
label="Claude 토큰",
|
246 |
-
type="password",
|
247 |
-
placeholder="Claude API 토큰을 입력하세요...",
|
248 |
-
show_copy_button=False
|
249 |
-
)
|
250 |
-
deepseek_token_box = gr.Textbox(
|
251 |
-
label="DeepSeek 토큰",
|
252 |
-
type="password",
|
253 |
-
placeholder="DeepSeek API 토큰을 입력하세요..."
|
254 |
-
)
|
255 |
-
|
256 |
#################
|
257 |
# 일반 모델 탭
|
258 |
#################
|
@@ -290,7 +266,7 @@ with gr.Blocks() as demo:
|
|
290 |
|
291 |
submit_button = gr.Button("전송")
|
292 |
|
293 |
-
def merge_and_call_hf(i1, i2, i3, i4, i5, m_name, mt, temp, top_p_, sys_msg
|
294 |
# 입력1~5를 공백 기준으로 합쳐서 question 구성
|
295 |
question = " ".join([i1, i2, i3, i4, i5])
|
296 |
return respond_hf_qna(
|
@@ -299,8 +275,7 @@ with gr.Blocks() as demo:
|
|
299 |
max_tokens=mt,
|
300 |
temperature=temp,
|
301 |
top_p=top_p_,
|
302 |
-
system_message=sys_msg
|
303 |
-
hf_token=hf_token
|
304 |
)
|
305 |
|
306 |
submit_button.click(
|
@@ -311,8 +286,7 @@ with gr.Blocks() as demo:
|
|
311 |
max_tokens,
|
312 |
temperature,
|
313 |
top_p,
|
314 |
-
system_message
|
315 |
-
hf_token_box
|
316 |
],
|
317 |
outputs=answer_output
|
318 |
)
|
@@ -344,15 +318,14 @@ with gr.Blocks() as demo:
|
|
344 |
|
345 |
cohere_submit_button = gr.Button("전송")
|
346 |
|
347 |
-
def merge_and_call_cohere(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_
|
348 |
question = " ".join([i1, i2, i3, i4, i5])
|
349 |
return respond_cohere_qna(
|
350 |
question=question,
|
351 |
system_message=sys_msg,
|
352 |
max_tokens=mt,
|
353 |
temperature=temp,
|
354 |
-
top_p=top_p_
|
355 |
-
hf_token=hf_token
|
356 |
)
|
357 |
|
358 |
cohere_submit_button.click(
|
@@ -362,8 +335,7 @@ with gr.Blocks() as demo:
|
|
362 |
cohere_system_message,
|
363 |
cohere_max_tokens,
|
364 |
cohere_temperature,
|
365 |
-
cohere_top_p
|
366 |
-
hf_token_box
|
367 |
],
|
368 |
outputs=cohere_answer_output
|
369 |
)
|
@@ -395,15 +367,14 @@ with gr.Blocks() as demo:
|
|
395 |
|
396 |
chatgpt_submit_button = gr.Button("전송")
|
397 |
|
398 |
-
def merge_and_call_chatgpt(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_
|
399 |
question = " ".join([i1, i2, i3, i4, i5])
|
400 |
return respond_chatgpt_qna(
|
401 |
question=question,
|
402 |
system_message=sys_msg,
|
403 |
max_tokens=mt,
|
404 |
temperature=temp,
|
405 |
-
top_p=top_p_
|
406 |
-
openai_token=openai_token
|
407 |
)
|
408 |
|
409 |
chatgpt_submit_button.click(
|
@@ -413,8 +384,7 @@ with gr.Blocks() as demo:
|
|
413 |
chatgpt_system_message,
|
414 |
chatgpt_max_tokens,
|
415 |
chatgpt_temperature,
|
416 |
-
chatgpt_top_p
|
417 |
-
openai_token_box
|
418 |
],
|
419 |
outputs=chatgpt_answer_output
|
420 |
)
|
@@ -463,15 +433,14 @@ with gr.Blocks() as demo:
|
|
463 |
|
464 |
claude_submit_button = gr.Button("전송")
|
465 |
|
466 |
-
def merge_and_call_claude(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_
|
467 |
question = " ".join([i1, i2, i3, i4, i5])
|
468 |
return respond_claude_qna(
|
469 |
question=question,
|
470 |
system_message=sys_msg,
|
471 |
max_tokens=mt,
|
472 |
temperature=temp,
|
473 |
-
top_p=top_p_
|
474 |
-
claude_api_key=claude_key
|
475 |
)
|
476 |
|
477 |
claude_submit_button.click(
|
@@ -481,8 +450,7 @@ with gr.Blocks() as demo:
|
|
481 |
claude_system_message,
|
482 |
claude_max_tokens,
|
483 |
claude_temperature,
|
484 |
-
claude_top_p
|
485 |
-
claude_token_box
|
486 |
],
|
487 |
outputs=claude_answer_output
|
488 |
)
|
@@ -514,15 +482,14 @@ with gr.Blocks() as demo:
|
|
514 |
|
515 |
deepseek_submit_button = gr.Button("전송")
|
516 |
|
517 |
-
def merge_and_call_deepseek(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_
|
518 |
question = " ".join([i1, i2, i3, i4, i5])
|
519 |
return respond_deepseek_qna(
|
520 |
question=question,
|
521 |
system_message=sys_msg,
|
522 |
max_tokens=mt,
|
523 |
temperature=temp,
|
524 |
-
top_p=top_p_
|
525 |
-
deepseek_token=deepseek_token
|
526 |
)
|
527 |
|
528 |
deepseek_submit_button.click(
|
@@ -532,8 +499,7 @@ with gr.Blocks() as demo:
|
|
532 |
deepseek_system_message,
|
533 |
deepseek_max_tokens,
|
534 |
deepseek_temperature,
|
535 |
-
deepseek_top_p
|
536 |
-
deepseek_token_box
|
537 |
],
|
538 |
outputs=deepseek_answer_output
|
539 |
)
|
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import openai
|
4 |
import anthropic
|
5 |
+
import os
|
6 |
from typing import Optional
|
7 |
|
8 |
#############################
|
|
|
22 |
# Cohere Command R+ 모델 ID 정의
|
23 |
COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
|
24 |
|
25 |
+
def get_client(model_name):
|
26 |
"""
|
27 |
모델 이름에 맞춰 InferenceClient 생성.
|
28 |
+
토큰은 환경 변수에서 가져옴.
|
29 |
"""
|
30 |
+
hf_token = os.getenv("HF_TOKEN")
|
31 |
if not hf_token:
|
32 |
raise ValueError("HuggingFace API 토큰이 필요합니다.")
|
33 |
|
|
|
46 |
max_tokens: int,
|
47 |
temperature: float,
|
48 |
top_p: float,
|
49 |
+
system_message: str
|
|
|
50 |
):
|
51 |
"""
|
52 |
HuggingFace 모델(Zephyr 등)에 대해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
53 |
"""
|
54 |
try:
|
55 |
+
client = get_client(model_name)
|
56 |
except ValueError as e:
|
57 |
return f"오류: {str(e)}"
|
58 |
|
|
|
81 |
system_message: str,
|
82 |
max_tokens: int,
|
83 |
temperature: float,
|
84 |
+
top_p: float
|
|
|
85 |
):
|
86 |
"""
|
87 |
Cohere Command R+ 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
88 |
"""
|
89 |
model_name = "Cohere Command R+"
|
90 |
try:
|
91 |
+
client = get_client(model_name)
|
92 |
except ValueError as e:
|
93 |
return f"오류: {str(e)}"
|
94 |
|
|
|
115 |
system_message: str,
|
116 |
max_tokens: int,
|
117 |
temperature: float,
|
118 |
+
top_p: float
|
|
|
119 |
):
|
120 |
"""
|
121 |
ChatGPT(OpenAI) 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
122 |
"""
|
123 |
+
openai_token = os.getenv("OPENAI_TOKEN")
|
124 |
if not openai_token:
|
125 |
return "OpenAI API 토큰이 필요합니다."
|
126 |
|
|
|
150 |
system_message: str,
|
151 |
max_tokens: int,
|
152 |
temperature: float,
|
153 |
+
top_p: float
|
|
|
154 |
):
|
155 |
"""
|
156 |
DeepSeek 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
157 |
"""
|
158 |
+
deepseek_token = os.getenv("DEEPSEEK_TOKEN")
|
159 |
if not deepseek_token:
|
160 |
return "DeepSeek API 토큰이 필요합니다."
|
161 |
|
|
|
186 |
system_message: str,
|
187 |
max_tokens: int,
|
188 |
temperature: float,
|
189 |
+
top_p: float
|
|
|
190 |
) -> str:
|
191 |
"""
|
192 |
Claude API를 사용한 개선된 응답 생성 함수
|
193 |
"""
|
194 |
+
claude_api_key = os.getenv("CLAUDE_TOKEN")
|
195 |
if not claude_api_key:
|
196 |
return "Claude API 토큰이 필요합니다."
|
197 |
|
|
|
229 |
with gr.Blocks() as demo:
|
230 |
gr.Markdown("# LLM 플레이그라운드")
|
231 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
#################
|
233 |
# 일반 모델 탭
|
234 |
#################
|
|
|
266 |
|
267 |
submit_button = gr.Button("전송")
|
268 |
|
269 |
+
def merge_and_call_hf(i1, i2, i3, i4, i5, m_name, mt, temp, top_p_, sys_msg):
|
270 |
# 입력1~5를 공백 기준으로 합쳐서 question 구성
|
271 |
question = " ".join([i1, i2, i3, i4, i5])
|
272 |
return respond_hf_qna(
|
|
|
275 |
max_tokens=mt,
|
276 |
temperature=temp,
|
277 |
top_p=top_p_,
|
278 |
+
system_message=sys_msg
|
|
|
279 |
)
|
280 |
|
281 |
submit_button.click(
|
|
|
286 |
max_tokens,
|
287 |
temperature,
|
288 |
top_p,
|
289 |
+
system_message
|
|
|
290 |
],
|
291 |
outputs=answer_output
|
292 |
)
|
|
|
318 |
|
319 |
cohere_submit_button = gr.Button("전송")
|
320 |
|
321 |
+
def merge_and_call_cohere(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
|
322 |
question = " ".join([i1, i2, i3, i4, i5])
|
323 |
return respond_cohere_qna(
|
324 |
question=question,
|
325 |
system_message=sys_msg,
|
326 |
max_tokens=mt,
|
327 |
temperature=temp,
|
328 |
+
top_p=top_p_
|
|
|
329 |
)
|
330 |
|
331 |
cohere_submit_button.click(
|
|
|
335 |
cohere_system_message,
|
336 |
cohere_max_tokens,
|
337 |
cohere_temperature,
|
338 |
+
cohere_top_p
|
|
|
339 |
],
|
340 |
outputs=cohere_answer_output
|
341 |
)
|
|
|
367 |
|
368 |
chatgpt_submit_button = gr.Button("전송")
|
369 |
|
370 |
+
def merge_and_call_chatgpt(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
|
371 |
question = " ".join([i1, i2, i3, i4, i5])
|
372 |
return respond_chatgpt_qna(
|
373 |
question=question,
|
374 |
system_message=sys_msg,
|
375 |
max_tokens=mt,
|
376 |
temperature=temp,
|
377 |
+
top_p=top_p_
|
|
|
378 |
)
|
379 |
|
380 |
chatgpt_submit_button.click(
|
|
|
384 |
chatgpt_system_message,
|
385 |
chatgpt_max_tokens,
|
386 |
chatgpt_temperature,
|
387 |
+
chatgpt_top_p
|
|
|
388 |
],
|
389 |
outputs=chatgpt_answer_output
|
390 |
)
|
|
|
433 |
|
434 |
claude_submit_button = gr.Button("전송")
|
435 |
|
436 |
+
def merge_and_call_claude(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
|
437 |
question = " ".join([i1, i2, i3, i4, i5])
|
438 |
return respond_claude_qna(
|
439 |
question=question,
|
440 |
system_message=sys_msg,
|
441 |
max_tokens=mt,
|
442 |
temperature=temp,
|
443 |
+
top_p=top_p_
|
|
|
444 |
)
|
445 |
|
446 |
claude_submit_button.click(
|
|
|
450 |
claude_system_message,
|
451 |
claude_max_tokens,
|
452 |
claude_temperature,
|
453 |
+
claude_top_p
|
|
|
454 |
],
|
455 |
outputs=claude_answer_output
|
456 |
)
|
|
|
482 |
|
483 |
deepseek_submit_button = gr.Button("전송")
|
484 |
|
485 |
+
def merge_and_call_deepseek(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
|
486 |
question = " ".join([i1, i2, i3, i4, i5])
|
487 |
return respond_deepseek_qna(
|
488 |
question=question,
|
489 |
system_message=sys_msg,
|
490 |
max_tokens=mt,
|
491 |
temperature=temp,
|
492 |
+
top_p=top_p_
|
|
|
493 |
)
|
494 |
|
495 |
deepseek_submit_button.click(
|
|
|
499 |
deepseek_system_message,
|
500 |
deepseek_max_tokens,
|
501 |
deepseek_temperature,
|
502 |
+
deepseek_top_p
|
|
|
503 |
],
|
504 |
outputs=deepseek_answer_output
|
505 |
)
|