MaxBlumenfeld
commited on
Commit
•
128a6b8
1
Parent(s):
3e99dbe
switched to shared tokenizer
Browse files
app.py
CHANGED
@@ -9,8 +9,9 @@ instruct_model_path = "MaxBlumenfeld/smollm2-135m-bootleg-instruct"
|
|
9 |
|
10 |
|
11 |
base_tokenizer = AutoTokenizer.from_pretrained(base_model_id)
|
12 |
-
# instruct_tokenizer = AutoTokenizer.from_pretrained(instruct_model_path
|
13 |
-
|
|
|
14 |
|
15 |
base_model = AutoModelForCausalLM.from_pretrained(base_model_id)
|
16 |
# instruct_model = AutoModelForCausalLM.from_pretrained(instruct_model_path, local_files_only=True)
|
@@ -68,7 +69,7 @@ def chat(message, temperature, max_length, system_prompt):
|
|
68 |
|
69 |
instruct_response = generate_response(
|
70 |
instruct_model,
|
71 |
-
|
72 |
message,
|
73 |
temperature,
|
74 |
max_length,
|
|
|
9 |
|
10 |
|
11 |
base_tokenizer = AutoTokenizer.from_pretrained(base_model_id)
|
12 |
+
# instruct_tokenizer = AutoTokenizer.from_pretrained(instruct_model_path)
|
13 |
+
|
14 |
+
|
15 |
|
16 |
base_model = AutoModelForCausalLM.from_pretrained(base_model_id)
|
17 |
# instruct_model = AutoModelForCausalLM.from_pretrained(instruct_model_path, local_files_only=True)
|
|
|
69 |
|
70 |
instruct_response = generate_response(
|
71 |
instruct_model,
|
72 |
+
base_tokenizer,
|
73 |
message,
|
74 |
temperature,
|
75 |
max_length,
|