Spaces:
Running
on
Zero
Running
on
Zero
import torch | |
import gradio as gr | |
import spaces | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
import os | |
from threading import Thread | |
import random | |
from datasets import load_dataset | |
import numpy as np | |
from sklearn.feature_extraction.text import TfidfVectorizer | |
import pandas as pd | |
from typing import List, Tuple | |
import json | |
from datetime import datetime | |
# GPU ๋ฉ๋ชจ๋ฆฌ ๊ด๋ฆฌ | |
torch.cuda.empty_cache() | |
# ํ๊ฒฝ ๋ณ์ ์ค์ | |
HF_TOKEN = os.environ.get("HF_TOKEN", None) | |
MODEL_ID = "CohereForAI/c4ai-command-r7b-12-2024" | |
MODELS = os.environ.get("MODELS") | |
MODEL_NAME = MODEL_ID.split("/")[-1] | |
# ๋ชจ๋ธ๊ณผ ํ ํฌ๋์ด์ ๋ก๋ | |
model = AutoModelForCausalLM.from_pretrained( | |
MODEL_ID, | |
torch_dtype=torch.bfloat16, | |
device_map="auto", | |
) | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
# ์ํคํผ๋์ ๋ฐ์ดํฐ์ ๋ก๋ | |
wiki_dataset = load_dataset("lcw99/wikipedia-korean-20240501-1million-qna") | |
print("Wikipedia dataset loaded:", wiki_dataset) | |
# TF-IDF ๋ฒกํฐ๋ผ์ด์ ์ด๊ธฐํ ๋ฐ ํ์ต | |
print("TF-IDF ๋ฒกํฐํ ์์...") | |
questions = wiki_dataset['train']['question'][:10000] # ์ฒ์ 10000๊ฐ๋ง ์ฌ์ฉ | |
vectorizer = TfidfVectorizer(max_features=1000) | |
question_vectors = vectorizer.fit_transform(questions) | |
print("TF-IDF ๋ฒกํฐํ ์๋ฃ") | |
class ChatHistory: | |
def __init__(self): | |
self.history = [] | |
self.history_file = "/tmp/chat_history.json" | |
self.load_history() | |
def add_conversation(self, user_msg: str, assistant_msg: str): | |
conversation = { | |
"timestamp": datetime.now().isoformat(), | |
"messages": [ | |
{"role": "user", "content": user_msg}, | |
{"role": "assistant", "content": assistant_msg} | |
] | |
} | |
self.history.append(conversation) | |
self.save_history() | |
def format_for_display(self): | |
formatted = [] | |
for conv in self.history: | |
formatted.append([ | |
conv["messages"][0]["content"], | |
conv["messages"][1]["content"] | |
]) | |
return formatted | |
def get_messages_for_api(self): | |
messages = [] | |
for conv in self.history: | |
messages.extend([ | |
{"role": "user", "content": conv["messages"][0]["content"]}, | |
{"role": "assistant", "content": conv["messages"][1]["content"]} | |
]) | |
return messages | |
def clear_history(self): | |
self.history = [] | |
self.save_history() | |
def save_history(self): | |
try: | |
with open(self.history_file, 'w', encoding='utf-8') as f: | |
json.dump(self.history, f, ensure_ascii=False, indent=2) | |
except Exception as e: | |
print(f"ํ์คํ ๋ฆฌ ์ ์ฅ ์คํจ: {e}") | |
def load_history(self): | |
try: | |
if os.path.exists(self.history_file): | |
with open(self.history_file, 'r', encoding='utf-8') as f: | |
self.history = json.load(f) | |
except Exception as e: | |
print(f"ํ์คํ ๋ฆฌ ๋ก๋ ์คํจ: {e}") | |
self.history = [] | |
# ์ ์ญ ChatHistory ์ธ์คํด์ค ์์ฑ | |
chat_history = ChatHistory() | |
def find_relevant_context(query, top_k=3): | |
# ์ฟผ๋ฆฌ ๋ฒกํฐํ | |
query_vector = vectorizer.transform([query]) | |
# ์ฝ์ฌ์ธ ์ ์ฌ๋ ๊ณ์ฐ | |
similarities = (query_vector * question_vectors.T).toarray()[0] | |
# ๊ฐ์ฅ ์ ์ฌํ ์ง๋ฌธ๋ค์ ์ธ๋ฑ์ค | |
top_indices = np.argsort(similarities)[-top_k:][::-1] | |
# ๊ด๋ จ ์ปจํ ์คํธ ์ถ์ถ | |
relevant_contexts = [] | |
for idx in top_indices: | |
if similarities[idx] > 0: | |
relevant_contexts.append({ | |
'question': questions[idx], | |
'answer': wiki_dataset['train']['answer'][idx], | |
'similarity': similarities[idx] | |
}) | |
return relevant_contexts | |
def analyze_file_content(content, file_type): | |
"""Analyze file content and return structural summary""" | |
if file_type in ['parquet', 'csv']: | |
try: | |
lines = content.split('\n') | |
header = lines[0] | |
columns = header.count('|') - 1 | |
rows = len(lines) - 3 | |
return f"๐ ๋ฐ์ดํฐ์ ๊ตฌ์กฐ: {columns}๊ฐ ์ปฌ๋ผ, {rows}๊ฐ ๋ฐ์ดํฐ" | |
except: | |
return "โ ๋ฐ์ดํฐ์ ๊ตฌ์กฐ ๋ถ์ ์คํจ" | |
lines = content.split('\n') | |
total_lines = len(lines) | |
non_empty_lines = len([line for line in lines if line.strip()]) | |
if any(keyword in content.lower() for keyword in ['def ', 'class ', 'import ', 'function']): | |
functions = len([line for line in lines if 'def ' in line]) | |
classes = len([line for line in lines if 'class ' in line]) | |
imports = len([line for line in lines if 'import ' in line or 'from ' in line]) | |
return f"๐ป ์ฝ๋ ๊ตฌ์กฐ: {total_lines}์ค (ํจ์: {functions}, ํด๋์ค: {classes}, ์ํฌํธ: {imports})" | |
paragraphs = content.count('\n\n') + 1 | |
words = len(content.split()) | |
return f"๐ ๋ฌธ์ ๊ตฌ์กฐ: {total_lines}์ค, {paragraphs}๋จ๋ฝ, ์ฝ {words}๋จ์ด" | |
def read_uploaded_file(file): | |
if file is None: | |
return "", "" | |
try: | |
file_ext = os.path.splitext(file.name)[1].lower() | |
if file_ext == '.parquet': | |
df = pd.read_parquet(file.name, engine='pyarrow') | |
content = df.head(10).to_markdown(index=False) | |
return content, "parquet" | |
elif file_ext == '.csv': | |
encodings = ['utf-8', 'cp949', 'euc-kr', 'latin1'] | |
for encoding in encodings: | |
try: | |
df = pd.read_csv(file.name, encoding=encoding) | |
content = f"๐ ๋ฐ์ดํฐ ๋ฏธ๋ฆฌ๋ณด๊ธฐ:\n{df.head(10).to_markdown(index=False)}\n\n" | |
content += f"\n๐ ๋ฐ์ดํฐ ์ ๋ณด:\n" | |
content += f"- ์ ์ฒด ํ ์: {len(df)}\n" | |
content += f"- ์ ์ฒด ์ด ์: {len(df.columns)}\n" | |
content += f"- ์ปฌ๋ผ ๋ชฉ๋ก: {', '.join(df.columns)}\n" | |
content += f"\n๐ ์ปฌ๋ผ ๋ฐ์ดํฐ ํ์ :\n" | |
for col, dtype in df.dtypes.items(): | |
content += f"- {col}: {dtype}\n" | |
null_counts = df.isnull().sum() | |
if null_counts.any(): | |
content += f"\nโ ๏ธ ๊ฒฐ์ธก์น:\n" | |
for col, null_count in null_counts[null_counts > 0].items(): | |
content += f"- {col}: {null_count}๊ฐ ๋๋ฝ\n" | |
return content, "csv" | |
except UnicodeDecodeError: | |
continue | |
raise UnicodeDecodeError(f"โ ์ง์๋๋ ์ธ์ฝ๋ฉ์ผ๋ก ํ์ผ์ ์ฝ์ ์ ์์ต๋๋ค ({', '.join(encodings)})") | |
else: | |
encodings = ['utf-8', 'cp949', 'euc-kr', 'latin1'] | |
for encoding in encodings: | |
try: | |
with open(file.name, 'r', encoding=encoding) as f: | |
content = f.read() | |
return content, "text" | |
except UnicodeDecodeError: | |
continue | |
raise UnicodeDecodeError(f"โ ์ง์๋๋ ์ธ์ฝ๋ฉ์ผ๋ก ํ์ผ์ ์ฝ์ ์ ์์ต๋๋ค ({', '.join(encodings)})") | |
except Exception as e: | |
return f"โ ํ์ผ ์ฝ๊ธฐ ์ค๋ฅ: {str(e)}", "error" | |
def stream_chat(message: str, history: list, uploaded_file, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float): | |
print(f'message is - {message}') | |
print(f'history is - {history}') | |
# ํ์ผ ์ ๋ก๋ ์ฒ๋ฆฌ | |
file_context = "" | |
if uploaded_file: | |
content, file_type = read_uploaded_file(uploaded_file) | |
if content: | |
file_context = f"\n\n์ ๋ก๋๋ ํ์ผ ๋ด์ฉ:\n```\n{content}\n```" | |
# ๊ด๋ จ ์ปจํ ์คํธ ์ฐพ๊ธฐ | |
relevant_contexts = find_relevant_context(message) | |
wiki_context = "\n\n๊ด๋ จ ์ํคํผ๋์ ์ ๋ณด:\n" | |
for ctx in relevant_contexts: | |
wiki_context += f"Q: {ctx['question']}\nA: {ctx['answer']}\n์ ์ฌ๋: {ctx['similarity']:.3f}\n\n" | |
# ๋ํ ํ์คํ ๋ฆฌ ๊ตฌ์ฑ | |
conversation = [] | |
for prompt, answer in history: | |
conversation.extend([ | |
{"role": "user", "content": prompt}, | |
{"role": "assistant", "content": answer} | |
]) | |
# ์ต์ข ํ๋กฌํํธ ๊ตฌ์ฑ | |
final_message = file_context + wiki_context + "\nํ์ฌ ์ง๋ฌธ: " + message | |
conversation.append({"role": "user", "content": final_message}) | |
input_ids = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True) | |
inputs = tokenizer(input_ids, return_tensors="pt").to(0) | |
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True) | |
generate_kwargs = dict( | |
inputs, | |
streamer=streamer, | |
top_k=top_k, | |
top_p=top_p, | |
repetition_penalty=penalty, | |
max_new_tokens=max_new_tokens, | |
do_sample=True, | |
temperature=temperature, | |
eos_token_id=[255001], | |
) | |
thread = Thread(target=model.generate, kwargs=generate_kwargs) | |
thread.start() | |
buffer = "" | |
for new_text in streamer: | |
buffer += new_text | |
yield buffer | |
CSS = """ | |
/* ์ ์ฒด ํ์ด์ง ์คํ์ผ๋ง */ | |
body { | |
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%); | |
min-height: 100vh; | |
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; | |
} | |
/* ... (์ด์ ์ CSS ์คํ์ผ ์ ์ง) ... */ | |
""" | |
with gr.Blocks(css=CSS) as demo: | |
with gr.Row(): | |
with gr.Column(scale=2): | |
chatbot = gr.Chatbot(height=500) | |
msg = gr.Textbox( | |
label="๋ฉ์์ง ์ ๋ ฅ", | |
show_label=False, | |
placeholder="๋ฌด์์ด๋ ๋ฌผ์ด๋ณด์ธ์... ๐ญ", | |
container=False | |
) | |
with gr.Row(): | |
clear = gr.ClearButton([msg, chatbot], value="๋ํ๋ด์ฉ ์ง์ฐ๊ธฐ") | |
send = gr.Button("๋ณด๋ด๊ธฐ ๐ค") | |
with gr.Column(scale=1): | |
gr.Markdown("### ํ์ผ ์ ๋ก๋ ๐") | |
file_upload = gr.File( | |
label="ํ์ผ ์ ํ", | |
file_types=["text", ".csv", ".parquet"], | |
type="filepath" | |
) | |
with gr.Accordion("๊ณ ๊ธ ์ค์ โ๏ธ", open=False): | |
temperature = gr.Slider( | |
minimum=0, | |
maximum=1, | |
step=0.1, | |
value=0.8, | |
label="์จ๋", | |
) | |
max_new_tokens = gr.Slider( | |
minimum=128, | |
maximum=8000, | |
step=1, | |
value=4000, | |
label="์ต๋ ํ ํฐ ์", | |
) | |
top_p = gr.Slider( | |
minimum=0.0, | |
maximum=1.0, | |
step=0.1, | |
value=0.8, | |
label="์์ ํ๋ฅ ", | |
) | |
top_k = gr.Slider( | |
minimum=1, | |
maximum=20, | |
step=1, | |
value=20, | |
label="์์ K", | |
) | |
penalty = gr.Slider( | |
minimum=0.0, | |
maximum=2.0, | |
step=0.1, | |
value=1.0, | |
label="๋ฐ๋ณต ํจ๋ํฐ", | |
) | |
# ์์ ์ง๋ฌธ | |
gr.Examples( | |
examples=[ | |
["ํ๊ตญ์ ์ ํต ์ ๊ธฐ์ 24์ ๊ธฐ์ ๋ํด ์์ธํ ์ค๋ช ํด์ฃผ์ธ์."], | |
["์ฐ๋ฆฌ๋๋ผ ์ ํต ์์ ์ค ๊ฑด๊ฐ์ ์ข์ ๋ฐํจ์์ 5๊ฐ์ง๋ฅผ ์ถ์ฒํ๊ณ ๊ทธ ํจ๋ฅ์ ์ค๋ช ํด์ฃผ์ธ์."], | |
["ํ๊ตญ์ ๋ํ์ ์ธ ์ฐ๋ค์ ์๊ฐํ๊ณ , ๊ฐ ์ฐ์ ํน์ง๊ณผ ๋ฑ์ฐ ์ฝ์ค๋ฅผ ์ถ์ฒํด์ฃผ์ธ์."], | |
["์ฌ๋ฌผ๋์ด์ ์ ๊ธฐ ๊ตฌ์ฑ๊ณผ ์ฅ๋จ์ ๋ํด ์ด๋ณด์๋ ์ดํดํ๊ธฐ ์ฝ๊ฒ ์ค๋ช ํด์ฃผ์ธ์."], | |
["ํ๊ตญ์ ์ ํต ๊ฑด์ถ๋ฌผ์ ๋ด๊ธด ๊ณผํ์ ์๋ฆฌ๋ฅผ ํ๋์ ๊ด์ ์์ ๋ถ์ํด์ฃผ์ธ์."], | |
["์กฐ์ ์๋ ๊ณผ๊ฑฐ ์ํ ์ ๋๋ฅผ ํ๋์ ์ ์ ์ ๋์ ๋น๊ตํ์ฌ ์ค๋ช ํด์ฃผ์ธ์."], | |
["ํ๊ตญ์ 4๋ ๊ถ๊ถ์ ๋น๊ตํ์ฌ ๊ฐ๊ฐ์ ํน์ง๊ณผ ์ญ์ฌ์ ์๋ฏธ๋ฅผ ์ค๋ช ํด์ฃผ์ธ์."], | |
["ํ๊ตญ์ ์ ํต ๋์ด๋ฅผ ํ๋์ ์ผ๋ก ์ฌํด์ํ์ฌ ์ค๋ด์์ ํ ์ ์๋ ๋ฐฉ๋ฒ์ ์ ์ํด์ฃผ์ธ์."], | |
["ํ๊ธ ์ฐฝ์ ๊ณผ์ ๊ณผ ํ๋ฏผ์ ์์ ๊ณผํ์ ์๋ฆฌ๋ฅผ ์์ธํ ์ค๋ช ํด์ฃผ์ธ์."], | |
["ํ๊ตญ์ ์ ํต ์ฐจ ๋ฌธํ์ ๋ํด ์ค๋ช ํ๊ณ , ๊ณ์ ๋ณ๋ก ์ด์ธ๋ฆฌ๋ ์ ํต์ฐจ๋ฅผ ์ถ์ฒํด์ฃผ์ธ์."], | |
["ํ๊ตญ์ ์ ํต ์๋ณต์ธ ํ๋ณต์ ๊ตฌ์กฐ์ ํน์ง์ ๊ณผํ์ , ๋ฏธํ์ ๊ด์ ์์ ๋ถ์ํด์ฃผ์ธ์."], | |
["ํ๊ตญ์ ์ ํต ๊ฐ์ฅ ๊ตฌ์กฐ๋ฅผ ๊ธฐํ์ ํ๊ฒฝ ๊ด์ ์์ ๋ถ์ํ๊ณ , ํ๋ ๊ฑด์ถ์ ์ ์ฉํ ์ ์๋ ์์๋ฅผ ์ ์ํด์ฃผ์ธ์."] | |
], | |
inputs=msg, | |
) | |
# ์ด๋ฒคํธ ๋ฐ์ธ๋ฉ | |
msg.submit( | |
stream_chat, | |
inputs=[msg, chatbot, file_upload, temperature, max_new_tokens, top_p, top_k, penalty], | |
outputs=[msg, chatbot] | |
) | |
send.click( | |
stream_chat, | |
inputs=[msg, chatbot, file_upload, temperature, max_new_tokens, top_p, top_k, penalty], | |
outputs=[msg, chatbot] | |
) | |
# ํ์ผ ์ ๋ก๋์ ์๋ ๋ถ์ | |
file_upload.change( | |
lambda: "ํ์ผ ๋ถ์์ ์์ํฉ๋๋ค...", | |
outputs=msg | |
).then( | |
stream_chat, | |
inputs=[msg, chatbot, file_upload, temperature, max_new_tokens, top_p, top_k, penalty], | |
outputs=[msg, chatbot] | |
) | |
if __name__ == "__main__": | |
demo.launch() |