Spaces:
Running
on
Zero
Running
on
Zero
tori29umai
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -60,43 +60,44 @@ def load_settings_from_ini(filename='character_settings.ini'):
|
|
60 |
|
61 |
# LlamaCppのラッパークラス
|
62 |
class LlamaCppAdapter:
|
63 |
-
@spaces.GPU(duration=120)
|
64 |
def __init__(self, model_path, n_ctx=4096):
|
65 |
-
|
|
|
66 |
self.llama = None
|
67 |
-
try:
|
68 |
-
self.llama = Llama(model_path=model_path, n_ctx=n_ctx, n_gpu_layers=0)
|
69 |
-
except Exception as e:
|
70 |
-
print(f"モデルの初期化中にエラーが発生しました: {e}")
|
71 |
|
72 |
-
|
|
|
73 |
if self.llama is None:
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
try:
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
except Exception as e:
|
78 |
print(f"生成中にエラーが発生しました: {e}")
|
79 |
return {"choices": [{"text": "生成中にエラーが発生しました。"}]}
|
80 |
|
81 |
-
def
|
82 |
-
return self.
|
83 |
-
prompt,
|
84 |
-
temperature=temperature,
|
85 |
-
max_tokens=max_new_tokens,
|
86 |
-
top_p=top_p,
|
87 |
-
top_k=top_k,
|
88 |
-
stop=stop,
|
89 |
-
repeat_penalty=1.2,
|
90 |
-
)
|
91 |
|
92 |
# キャラクターメーカークラス
|
93 |
class CharacterMaker:
|
94 |
def __init__(self):
|
95 |
-
self.llama =
|
96 |
-
try:
|
97 |
-
self.llama = LlamaCppAdapter(model_path)
|
98 |
-
except Exception as e:
|
99 |
-
print(f"LlamaCppAdapter の初期化中にエラーが発生しました: {e}")
|
100 |
self.history = []
|
101 |
self.settings = load_settings_from_ini()
|
102 |
if not self.settings:
|
@@ -129,15 +130,15 @@ class CharacterMaker:
|
|
129 |
save_settings_to_ini(self.settings)
|
130 |
|
131 |
def make(self, input_str: str):
|
132 |
-
if self.llama is None:
|
133 |
-
return "申し訳ありません。モデルが正しく初期化されていません。"
|
134 |
-
|
135 |
prompt = self._generate_aki(input_str)
|
136 |
print(prompt)
|
137 |
print("-----------------")
|
138 |
try:
|
139 |
res = self.llama.generate(prompt, max_new_tokens=1000, stop=["<END>", "\n"])
|
140 |
-
|
|
|
|
|
|
|
141 |
except Exception as e:
|
142 |
print(f"生成中にエラーが発生しました: {e}")
|
143 |
res_text = "申し訳ありません。応答の生成中にエラーが発生しました。"
|
@@ -170,11 +171,7 @@ class CharacterMaker:
|
|
170 |
|
171 |
def reset(self):
|
172 |
self.history = []
|
173 |
-
|
174 |
-
self.llama = LlamaCppAdapter(model_path)
|
175 |
-
except Exception as e:
|
176 |
-
print(f"LlamaCppAdapter の再初期化中にエラーが発生しました: {e}")
|
177 |
-
self.llama = None
|
178 |
|
179 |
character_maker = CharacterMaker()
|
180 |
|
|
|
60 |
|
61 |
# LlamaCppのラッパークラス
|
62 |
class LlamaCppAdapter:
|
|
|
63 |
def __init__(self, model_path, n_ctx=4096):
|
64 |
+
self.model_path = model_path
|
65 |
+
self.n_ctx = n_ctx
|
66 |
self.llama = None
|
|
|
|
|
|
|
|
|
67 |
|
68 |
+
@spaces.GPU(duration=120)
|
69 |
+
def initialize_and_generate(self, prompt, max_new_tokens=4096, temperature=0.5, top_p=0.7, top_k=80, stop=["<END>"]):
|
70 |
if self.llama is None:
|
71 |
+
print(f"モデルの初期化: {self.model_path}")
|
72 |
+
try:
|
73 |
+
self.llama = Llama(model_path=self.model_path, n_ctx=self.n_ctx, n_gpu_layers=-1)
|
74 |
+
print("モデルの初期化が成功しました。")
|
75 |
+
except Exception as e:
|
76 |
+
print(f"モデルの初期化中にエラーが発生しました: {e}")
|
77 |
+
return {"choices": [{"text": "モデルの初期化に失敗しました。"}]}
|
78 |
+
|
79 |
try:
|
80 |
+
result = self.llama(
|
81 |
+
prompt,
|
82 |
+
temperature=temperature,
|
83 |
+
max_tokens=max_new_tokens,
|
84 |
+
top_p=top_p,
|
85 |
+
top_k=top_k,
|
86 |
+
stop=stop,
|
87 |
+
repeat_penalty=1.2,
|
88 |
+
)
|
89 |
+
return result
|
90 |
except Exception as e:
|
91 |
print(f"生成中にエラーが発生しました: {e}")
|
92 |
return {"choices": [{"text": "生成中にエラーが発生しました。"}]}
|
93 |
|
94 |
+
def generate(self, prompt, max_new_tokens=4096, temperature=0.5, top_p=0.7, top_k=80, stop=["<END>"]):
|
95 |
+
return self.initialize_and_generate(prompt, max_new_tokens, temperature, top_p, top_k, stop)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
# キャラクターメーカークラス
|
98 |
class CharacterMaker:
|
99 |
def __init__(self):
|
100 |
+
self.llama = LlamaCppAdapter(model_path)
|
|
|
|
|
|
|
|
|
101 |
self.history = []
|
102 |
self.settings = load_settings_from_ini()
|
103 |
if not self.settings:
|
|
|
130 |
save_settings_to_ini(self.settings)
|
131 |
|
132 |
def make(self, input_str: str):
|
|
|
|
|
|
|
133 |
prompt = self._generate_aki(input_str)
|
134 |
print(prompt)
|
135 |
print("-----------------")
|
136 |
try:
|
137 |
res = self.llama.generate(prompt, max_new_tokens=1000, stop=["<END>", "\n"])
|
138 |
+
if isinstance(res, dict) and "choices" in res and len(res["choices"]) > 0:
|
139 |
+
res_text = res["choices"][0]["text"]
|
140 |
+
else:
|
141 |
+
res_text = "応答の生成に失敗しました。"
|
142 |
except Exception as e:
|
143 |
print(f"生成中にエラーが発生しました: {e}")
|
144 |
res_text = "申し訳ありません。応答の生成中にエラーが発生しました。"
|
|
|
171 |
|
172 |
def reset(self):
|
173 |
self.history = []
|
174 |
+
self.llama = LlamaCppAdapter(model_path)
|
|
|
|
|
|
|
|
|
175 |
|
176 |
character_maker = CharacterMaker()
|
177 |
|