pecore / presets.py
gsarti's picture
Finished code generator
8b6d1b6
raw
history blame
8.11 kB
import json
SYSTEM_PROMPT = "You are a helpful assistant that provide concise and accurate answers."
def set_cora_preset():
return (
"gsarti/cora_mgen", # model_name_or_path
"<Q>:{current} <P>:{context}", # input_template
"<Q>:{current}", # input_current_text_template
)
def set_default_preset():
return (
"gpt2", # model_name_or_path
"{current} {context}", # input_template
"{current}", # output_template
"{current}", # contextless_input_template
"{current}", # contextless_output_template
[], # special_tokens_to_keep
"", # decoder_input_output_separator
"{}", # model_kwargs
"{}", # tokenizer_kwargs
"{}", # generation_kwargs
"{}", # attribution_kwargs
)
def set_zephyr_preset():
return (
"stabilityai/stablelm-2-zephyr-1_6b", # model_name_or_path
"<|system|>{system_prompt}<|endoftext|>\n<|user|>\n{context}\n\n{current}<|endoftext|>\n<|assistant|>".format(system_prompt=SYSTEM_PROMPT), # input_template
"<|system|>{system_prompt}<|endoftext|>\n<|user|>\n{current}<|endoftext|>\n<|assistant|>".format(system_prompt=SYSTEM_PROMPT), # input_current_text_template
"\n", # decoder_input_output_separator
["<|im_start|>", "<|im_end|>", "<|endoftext|>"], # special_tokens_to_keep
)
def set_chatml_preset():
return (
"Qwen/Qwen1.5-0.5B-Chat", # model_name_or_path
"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{context}\n\n{current}<|im_end|>\n<|im_start|>assistant".format(system_prompt=SYSTEM_PROMPT), # input_template
"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{current}<|im_end|>\n<|im_start|>assistant".format(system_prompt=SYSTEM_PROMPT), # input_current_text_template
"\n", # decoder_input_output_separator
["<|im_start|>", "<|im_end|>"], # special_tokens_to_keep
)
def set_mmt_preset():
return (
"facebook/mbart-large-50-one-to-many-mmt", # model_name_or_path
"{context} {current}", # input_template
"{context} {current}", # output_template
'{\n\t"src_lang": "en_XX",\n\t"tgt_lang": "fr_XX"\n}', # tokenizer_kwargs
)
def set_towerinstruct_preset():
return (
"Unbabel/TowerInstruct-7B-v0.1", # model_name_or_path
"<|im_start|>user\nSource: {current}\nContext: {context}\nTranslate the above text into French. Use the context to guide your answer.\nTarget:<|im_end|>\n<|im_start|>assistant", # input_template
"<|im_start|>user\nSource: {current}\nTranslate the above text into French.\nTarget:<|im_end|>\n<|im_start|>assistant", # input_current_text_template
"\n", # decoder_input_output_separator
["<|im_start|>", "<|im_end|>"], # special_tokens_to_keep
)
def set_gemma_preset():
return (
"google/gemma-2b-it", # model_name_or_path
"<start_of_turn>user\n{context}\n{current}<end_of_turn>\n<start_of_turn>model", # input_template
"<start_of_turn>user\n{current}<end_of_turn>\n<start_of_turn>model", # input_current_text_template
"\n", # decoder_input_output_separator
["<start_of_turn>", "<end_of_turn>"], # special_tokens_to_keep
)
def set_mistral_instruct_preset():
return (
"mistralai/Mistral-7B-Instruct-v0.2" # model_name_or_path
"[INST]{context}\n{current}[/INST]" # input_template
"[INST]{current}[/INST]" # input_current_text_template
"\n" # decoder_input_output_separator
)
def update_code_snippets_fn(
input_current_text: str,
input_context_text: str,
output_current_text: str,
output_context_text: str,
model_name_or_path: str,
attribution_method: str,
attributed_fn: str | None,
context_sensitivity_metric: str,
context_sensitivity_std_threshold: float,
context_sensitivity_topk: int,
attribution_std_threshold: float,
attribution_topk: int,
input_template: str,
output_template: str,
contextless_input_template: str,
contextless_output_template: str,
special_tokens_to_keep: str | list[str] | None,
decoder_input_output_separator: str,
model_kwargs: str,
tokenizer_kwargs: str,
generation_kwargs: str,
attribution_kwargs: str,
) -> tuple[str, str]:
def get_kwargs_str(kwargs: str, name: str, pad: str = " " * 4) -> str:
kwargs_dict = json.loads(kwargs)
return nl + pad + name + '=' + str(kwargs_dict) + ',' if kwargs_dict else ''
nl = "\n"
tq = "\"\"\""
# Python
python = f"""#!pip install inseq
import inseq
from inseq.commands.attribute_context import attribute_context_with_model
inseq_model = inseq.load_model(
"{model_name_or_path}",
"{attribution_method}",{get_kwargs_str(model_kwargs, "model_kwargs")}{get_kwargs_str(tokenizer_kwargs, "tokenizer_kwargs")}
)
pecore_args = AttributeContextArgs(
save_path="pecore_output.json",
viz_path="pecore_output.html",
model_name_or_path="{model_name_or_path}",
attribution_method="{attribution_method}",
attributed_fn="{attributed_fn}",
context_sensitivity_metric="{context_sensitivity_metric}",
special_tokens_to_keep={special_tokens_to_keep},
context_sensitivity_std_threshold={context_sensitivity_std_threshold},
attribution_std_threshold={attribution_std_threshold},
input_current_text=\"\"\"{input_current_text}\"\"\",
input_template=\"\"\"{input_template}\"\"\",
output_template="{output_template}",
contextless_input_current_text=\"\"\"{contextless_input_template}\"\"\",
contextless_output_current_text=\"\"\"{contextless_output_template}\"\"\",
context_sensitivity_topk={context_sensitivity_topk if context_sensitivity_topk > 0 else None},
attribution_topk={attribution_topk if attribution_topk > 0 else None},
input_context_text={tq + input_context_text + tq if input_context_text else None},
output_context_text={tq + output_context_text + tq if output_context_text else None},
output_current_text={tq + output_current_text + tq if output_current_text else None},
decoder_input_output_separator={tq + decoder_input_output_separator + tq if decoder_input_output_separator else None},{get_kwargs_str(model_kwargs, "model_kwargs")}{get_kwargs_str(tokenizer_kwargs, "tokenizer_kwargs")}{get_kwargs_str(generation_kwargs, "generation_kwargs")}{get_kwargs_str(attribution_kwargs, "attribution_kwargs")}
)
out = attribute_context_with_model(pecore_args, loaded_model)"""
# Bash
bash = f"""pip install inseq
inseq attribute-context \\
--save-path pecore_output.json \\
--viz-path pecore_output.html \\
--model-name-or-path "{model_name_or_path}" \\
--attribution-method "{attribution_method}" \\
--attributed-fn "{attributed_fn}" \\
--context-sensitivity-metric "{context_sensitivity_metric}" \\
--special-tokens-to-keep {" ".join(special_tokens_to_keep)} \\
--context-sensitivity-std-threshold {context_sensitivity_std_threshold} \\
--attribution-std-threshold {attribution_std_threshold} \\
--input-current-text "{input_current_text}" \\
--input-template "{input_template}" \\
--output-template "{output_template}" \\
--contextless-input-current-text "{contextless_input_template}" \\
--contextless-output-current-text "{contextless_output_template}" \\
--context-sensitivity-topk {context_sensitivity_topk if context_sensitivity_topk > 0 else None} \\
--attribution-topk {attribution_topk if attribution_topk > 0 else None} \\
--input-context-text "{input_context_text}" \\
--output-context-text "{output_context_text}" \\
--output-current-text "{output_current_text}" \\
--decoder-input-output-separator "{decoder_input_output_separator}" \\
--model-kwargs "{str(model_kwargs).replace(nl, "")}" \\
--tokenizer-kwargs "{str(tokenizer_kwargs).replace(nl, "")} \\
--generation-kwargs "{str(generation_kwargs).replace(nl, "")}" \\
--attribution-kwargs "{str(attribution_kwargs).replace(nl, "")}"
"""
return python, bash