Spaces:
Runtime error
Runtime error
zetavg
commited on
Commit
•
116804a
1
Parent(s):
320751d
update
Browse files- llama_lora/ui/finetune_ui.py +23 -3
- llama_lora/ui/main_page.py +21 -1
- llama_lora/ui/tokenizer_ui.py +6 -6
- llama_lora/utils/prompter.py +5 -2
llama_lora/ui/finetune_ui.py
CHANGED
@@ -334,15 +334,30 @@ Train data (first 10):
|
|
334 |
time.sleep(2)
|
335 |
return message
|
336 |
|
|
|
|
|
337 |
class UiTrainerCallback(TrainerCallback):
|
338 |
def _on_progress(self, args, state, control):
|
|
|
|
|
339 |
if Global.should_stop_training:
|
340 |
control.should_training_stop = True
|
341 |
total_steps = (
|
342 |
state.max_steps if state.max_steps is not None else state.num_train_epochs * state.steps_per_epoch)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
343 |
progress(
|
344 |
(state.global_step, total_steps),
|
345 |
-
desc=f"Training... (
|
346 |
)
|
347 |
|
348 |
def on_epoch_begin(self, args, state, control, **kwargs):
|
@@ -382,7 +397,12 @@ Train data (first 10):
|
|
382 |
None, # resume_from_checkpoint
|
383 |
training_callbacks # callbacks
|
384 |
)
|
385 |
-
|
|
|
|
|
|
|
|
|
|
|
386 |
|
387 |
except Exception as e:
|
388 |
raise gr.Error(e)
|
@@ -582,7 +602,7 @@ def finetune_ui():
|
|
582 |
|
583 |
with gr.Column():
|
584 |
model_name = gr.Textbox(
|
585 |
-
lines=1, label="LoRA Model Name", value=random_name
|
586 |
elem_id="finetune_model_name",
|
587 |
)
|
588 |
|
|
|
334 |
time.sleep(2)
|
335 |
return message
|
336 |
|
337 |
+
log_history = []
|
338 |
+
|
339 |
class UiTrainerCallback(TrainerCallback):
|
340 |
def _on_progress(self, args, state, control):
|
341 |
+
nonlocal log_history
|
342 |
+
|
343 |
if Global.should_stop_training:
|
344 |
control.should_training_stop = True
|
345 |
total_steps = (
|
346 |
state.max_steps if state.max_steps is not None else state.num_train_epochs * state.steps_per_epoch)
|
347 |
+
log_history = state.log_history
|
348 |
+
last_history = None
|
349 |
+
last_loss = None
|
350 |
+
if len(log_history) > 0:
|
351 |
+
last_history = log_history[-1]
|
352 |
+
last_loss = last_history.get('loss', None)
|
353 |
+
|
354 |
+
progress_detail = f"Epoch {math.ceil(state.epoch)}/{epochs}"
|
355 |
+
if last_loss is not None:
|
356 |
+
progress_detail += f", Loss: {last_loss:.4f}"
|
357 |
+
|
358 |
progress(
|
359 |
(state.global_step, total_steps),
|
360 |
+
desc=f"Training... ({progress_detail})"
|
361 |
)
|
362 |
|
363 |
def on_epoch_begin(self, args, state, control, **kwargs):
|
|
|
397 |
None, # resume_from_checkpoint
|
398 |
training_callbacks # callbacks
|
399 |
)
|
400 |
+
|
401 |
+
logs_str = "\n".join([json.dumps(log) for log in log_history]) or "None"
|
402 |
+
|
403 |
+
result_message = f"Training ended:\n{str(results)}\n\nLogs:\n{logs_str}"
|
404 |
+
print(result_message)
|
405 |
+
return result_message
|
406 |
|
407 |
except Exception as e:
|
408 |
raise gr.Error(e)
|
|
|
602 |
|
603 |
with gr.Column():
|
604 |
model_name = gr.Textbox(
|
605 |
+
lines=1, label="LoRA Model Name", value=random_name,
|
606 |
elem_id="finetune_model_name",
|
607 |
)
|
608 |
|
llama_lora/ui/main_page.py
CHANGED
@@ -109,6 +109,12 @@ def main_page_custom_css():
|
|
109 |
font-weight: 100;
|
110 |
}
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
.error-message, .error-message p {
|
113 |
color: var(--error-text-color) !important;
|
114 |
}
|
@@ -161,12 +167,14 @@ def main_page_custom_css():
|
|
161 |
}
|
162 |
|
163 |
.inference_options_group {
|
164 |
-
margin-top: -
|
|
|
165 |
}
|
166 |
.inference_options_group > .form {
|
167 |
border-radius: 0;
|
168 |
border-left: 0;
|
169 |
border-right: 0;
|
|
|
170 |
box-shadow: none;
|
171 |
}
|
172 |
|
@@ -395,10 +403,22 @@ def main_page_custom_css():
|
|
395 |
}
|
396 |
}
|
397 |
|
|
|
398 |
#tokenizer_encoded_tokens_input_textbox .codemirror-wrapper,
|
399 |
#tokenizer_decoded_text_input_textbox .codemirror-wrapper {
|
400 |
margin-bottom: -20px;
|
401 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
402 |
|
403 |
.foot_stop_timeoutable_btn {
|
404 |
align-self: flex-end;
|
|
|
109 |
font-weight: 100;
|
110 |
}
|
111 |
|
112 |
+
/*
|
113 |
+
.codemirror-wrapper .cm-editor .cm-gutters {
|
114 |
+
background-color: var(--background-fill-secondary);
|
115 |
+
}
|
116 |
+
*/
|
117 |
+
|
118 |
.error-message, .error-message p {
|
119 |
color: var(--error-text-color) !important;
|
120 |
}
|
|
|
167 |
}
|
168 |
|
169 |
.inference_options_group {
|
170 |
+
margin-top: -16px;
|
171 |
+
margin-bottom: -16px;
|
172 |
}
|
173 |
.inference_options_group > .form {
|
174 |
border-radius: 0;
|
175 |
border-left: 0;
|
176 |
border-right: 0;
|
177 |
+
border-bottom: 0;
|
178 |
box-shadow: none;
|
179 |
}
|
180 |
|
|
|
403 |
}
|
404 |
}
|
405 |
|
406 |
+
/*
|
407 |
#tokenizer_encoded_tokens_input_textbox .codemirror-wrapper,
|
408 |
#tokenizer_decoded_text_input_textbox .codemirror-wrapper {
|
409 |
margin-bottom: -20px;
|
410 |
}
|
411 |
+
*/
|
412 |
+
#tokenizer_encoded_tokens_input_textbox,
|
413 |
+
#tokenizer_decoded_text_input_textbox {
|
414 |
+
overflow: hidden !important;
|
415 |
+
}
|
416 |
+
|
417 |
+
/* in case if there's too many logs on the previous run and made the box too high */
|
418 |
+
#finetune_training_status:has(.wrap:not(.hide)) {
|
419 |
+
max-height: 160px;
|
420 |
+
height: 160px;
|
421 |
+
}
|
422 |
|
423 |
.foot_stop_timeoutable_btn {
|
424 |
align-self: flex-end;
|
llama_lora/ui/tokenizer_ui.py
CHANGED
@@ -66,12 +66,12 @@ def tokenizer_ui():
|
|
66 |
things_that_might_timeout.append(decoding)
|
67 |
things_that_might_timeout.append(encoding)
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
|
76 |
tokenizer_ui_blocks.load(_js="""
|
77 |
function tokenizer_ui_blocks_js() {
|
|
|
66 |
things_that_might_timeout.append(decoding)
|
67 |
things_that_might_timeout.append(encoding)
|
68 |
|
69 |
+
stop_timeoutable_btn = gr.Button(
|
70 |
+
"stop not-responding elements",
|
71 |
+
elem_id="inference_stop_timeoutable_btn",
|
72 |
+
elem_classes="foot_stop_timeoutable_btn")
|
73 |
+
stop_timeoutable_btn.click(
|
74 |
+
fn=None, inputs=None, outputs=None, cancels=things_that_might_timeout)
|
75 |
|
76 |
tokenizer_ui_blocks.load(_js="""
|
77 |
function tokenizer_ui_blocks_js() {
|
llama_lora/utils/prompter.py
CHANGED
@@ -48,7 +48,8 @@ class Prompter(object):
|
|
48 |
elif "variables" in self.template:
|
49 |
variable_names = self.template.get("variables")
|
50 |
if type(variables) == dict:
|
51 |
-
variables = [variables.get(name, None)
|
|
|
52 |
if "default" not in self.template:
|
53 |
raise ValueError(
|
54 |
f"The template {self.template_name} has \"variables\" defined but does not has a default prompt defined. Please do it like: '\"default\": \"prompt_with_instruction\"' to handle cases when a matching prompt can't be found.")
|
@@ -91,7 +92,9 @@ class Prompter(object):
|
|
91 |
def get_response(self, output: str) -> str:
|
92 |
if self.template_name == "None":
|
93 |
return output
|
94 |
-
return
|
|
|
|
|
95 |
|
96 |
def get_variable_names(self) -> List[str]:
|
97 |
if self.template_name == "None":
|
|
|
48 |
elif "variables" in self.template:
|
49 |
variable_names = self.template.get("variables")
|
50 |
if type(variables) == dict:
|
51 |
+
variables = [variables.get(name, None)
|
52 |
+
for name in variable_names]
|
53 |
if "default" not in self.template:
|
54 |
raise ValueError(
|
55 |
f"The template {self.template_name} has \"variables\" defined but does not has a default prompt defined. Please do it like: '\"default\": \"prompt_with_instruction\"' to handle cases when a matching prompt can't be found.")
|
|
|
92 |
def get_response(self, output: str) -> str:
|
93 |
if self.template_name == "None":
|
94 |
return output
|
95 |
+
return self.template["response_split"].join(
|
96 |
+
output.split(self.template["response_split"])[1:]
|
97 |
+
).strip()
|
98 |
|
99 |
def get_variable_names(self) -> List[str]:
|
100 |
if self.template_name == "None":
|