Spaces:
Runtime error
Runtime error
import spaces | |
import json | |
import subprocess | |
from llama_cpp import Llama | |
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType | |
from llama_cpp_agent.providers import LlamaCppPythonProvider | |
from llama_cpp_agent.chat_history import BasicChatHistory | |
from llama_cpp_agent.chat_history.messages import Roles | |
import gradio as gr | |
import random | |
from datasets import load_dataset | |
from huggingface_hub import hf_hub_download | |
# モデルのダウンロード | |
hf_hub_download( | |
repo_id="Aratako/Oumuamua-7b-RP-GGUF", | |
filename="Oumuamua-7b-RP_Q4_K_M.gguf", | |
local_dir="./models" | |
) | |
hf_hub_download( | |
repo_id="bartowski/Oumuamua-7b-instruct-v2-GGUF", | |
filename="Oumuamua-7b-instruct-v2-Q4_K_M.gguf", | |
local_dir="./models" | |
) | |
hf_hub_download( | |
repo_id="mmnga/umiyuki-Umievo-itr012-Gleipnir-7B-gguf", | |
filename="umiyuki-Umievo-itr012-Gleipnir-7B-Q4_K_M.gguf", | |
local_dir="./models" | |
) | |
hf_hub_download( | |
repo_id="Local-Novel-LLM-project/Ninja-V3-GGUF", | |
filename="Ninja-V3-Q4_K_M.gguf", | |
local_dir="./models" | |
) | |
hf_hub_download( | |
repo_id="Local-Novel-LLM-project/Kagemusya-7B-v1-GGUF", | |
filename="kagemusya-7b-v1Q8_0.gguf", | |
local_dir="./models" | |
) | |
hf_hub_download( | |
repo_id="elyza/Llama-3-ELYZA-JP-8B-GGUF", | |
filename="Llama-3-ELYZA-JP-8B-q4_k_m.gguf", | |
local_dir="./models" | |
) | |
llm = None | |
llm_model = None | |
# データセットをロードしてスプリットを確認 | |
dataset = load_dataset("elyza/ELYZA-tasks-100") | |
print(dataset) | |
# 使用するスプリット名を確認 | |
split_name = "train" if "train" in dataset else "test" # デフォルトをtrainにし、なければtestにフォールバック | |
# 適切なスプリットから10個の例を取得 | |
examples_list = list(dataset[split_name]) # スプリットをリストに変換 | |
examples = random.sample(examples_list, 10) # リストからランダムに10個選択 | |
example_inputs = [[example['input']] for example in examples] # ネストされたリストに変換 | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
model, | |
template, | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
top_k, | |
repeat_penalty, | |
): | |
chat_template = MessagesFormatterType[template] | |
global llm | |
global llm_model | |
if llm is None or llm_model != model: | |
llm = Llama( | |
model_path=f"models/{model}", | |
flash_attn=True, | |
n_gpu_layers=81, | |
n_batch=1024, | |
n_ctx=8192, | |
) | |
llm_model = model | |
provider = LlamaCppPythonProvider(llm) | |
agent = LlamaCppAgent( | |
provider, | |
system_prompt=f"{system_message}", | |
predefined_messages_formatter_type=chat_template, | |
debug_output=True | |
) | |
settings = provider.get_provider_default_settings() | |
settings.temperature = temperature | |
settings.top_k = top_k | |
settings.top_p = top_p | |
settings.max_tokens = max_tokens | |
settings.repeat_penalty = repeat_penalty | |
settings.stream = True | |
messages = BasicChatHistory() | |
for msn in history: | |
user = { | |
'role': Roles.user, | |
'content': msn[0] | |
} | |
assistant = { | |
'role': Roles.assistant, | |
'content': msn[1] | |
} | |
messages.add_message(user) | |
messages.add_message(assistant) | |
stream = agent.get_chat_response( | |
message, | |
llm_sampling_settings=settings, | |
chat_history=messages, | |
returns_streaming_generator=True, | |
print_output=False | |
) | |
outputs = "" | |
for output in stream: | |
outputs += output | |
yield outputs | |
description = """<p align="center">★画面下のAdditional Inputから、使用したいモデルと、チャットテンプレートを選択してください。★</p> | |
<p><center> | |
<a href="https://huggingface.co/Aratako/Oumuamua-7b-RP-GGUF" target="_blank">[Oumuamua-7b-RP Model]</a><br> | |
<a href="https://huggingface.co/bartowski/Oumuamua-7b-instruct-v2-GGUF" target="_blank">[Oumuamua-7b-instruct-v2 Model]</a><br> | |
<a href="https://huggingface.co/mmnga/umiyuki-Umievo-itr012-Gleipnir-7B-gguf" target="_blank">[Umievo-itr012-Gleipnir-7B Model]</a><br> | |
<a href="https://huggingface.co/Local-Novel-LLM-project/Ninja-V3-GGUF" target="_blank">[Ninja-V3 Model]</a><br> | |
<a href="https://huggingface.co/Local-Novel-LLM-project/Kagemusya-7B-v1-GGUF" target="_blank">[Kagemusya-7B-v1 Model]</a><br> | |
<a href="https://huggingface.co/elyza/Llama-3-ELYZA-JP-8B-GGUF" target="_blank">[Llama-3-ELYZA-JP-8B Model]</a> | |
</center></p> | |
""" | |
templates = [ | |
"MISTRAL", "CHATML", "VICUNA", "LLAMA_2", "SYNTHIA", | |
"NEURAL_CHAT", "SOLAR", "OPEN_CHAT", "ALPACA", "CODE_DS", | |
"B22", "LLAMA_3", "PHI_3" | |
] | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Dropdown([ | |
'Oumuamua-7b-RP_Q4_K_M.gguf', | |
'Oumuamua-7b-instruct-v2-Q4_K_M.gguf', | |
'umiyuki-Umievo-itr012-Gleipnir-7B-Q4_K_M.gguf', | |
'Ninja-V3-Q4_K_M.gguf', | |
'kagemusya-7b-v1Q8_0.gguf', | |
'Llama-3-ELYZA-JP-8B-q4_k_m.gguf' | |
], | |
value="Oumuamua-7b-RP_Q4_K_M.gguf", | |
label="Model" | |
), | |
gr.Dropdown( | |
choices=templates, | |
value="LLAMA_2", | |
label="Template" | |
), | |
gr.Textbox(value="You are a helpful assistant.", label="System message"), | |
gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider( | |
minimum=0.1, | |
maximum=1.0, | |
value=0.95, | |
step=0.05, | |
label="Top-p", | |
), | |
gr.Slider( | |
minimum=0, | |
maximum=100, | |
value=40, | |
step=1, | |
label="Top-k", | |
), | |
gr.Slider( | |
minimum=0.0, | |
maximum=2.0, | |
value=1.1, | |
step=0.1, | |
label="Repetition penalty", | |
), | |
], | |
examples=example_inputs, | |
cache_examples=False, | |
retry_btn="Retry", | |
undo_btn="Undo", | |
clear_btn="Clear", | |
submit_btn="Send", | |
title="Chat with various models using llama.cpp", | |
description=description, | |
chatbot=gr.Chatbot( | |
scale=1, | |
likeable=False, | |
show_copy_button=True | |
) | |
) | |
if __name__ == "__main__": | |
demo.launch() | |