|
import json |
|
|
|
SYSTEM_PROMPT = "You are a helpful assistant that provide concise and accurate answers." |
|
|
|
def set_cora_preset(): |
|
return ( |
|
"gsarti/cora_mgen", |
|
"<Q>:{current} <P>:{context}", |
|
"<Q>:{current}", |
|
) |
|
|
|
|
|
def set_default_preset(): |
|
return ( |
|
"gpt2", |
|
"{current} {context}", |
|
"{current}", |
|
"{current}", |
|
"{current}", |
|
[], |
|
"", |
|
"{}", |
|
"{}", |
|
"{}", |
|
"{}", |
|
) |
|
|
|
|
|
def set_zephyr_preset(): |
|
return ( |
|
"stabilityai/stablelm-2-zephyr-1_6b", |
|
"<|system|>{system_prompt}<|endoftext|>\n<|user|>\n{context}\n\n{current}<|endoftext|>\n<|assistant|>".format(system_prompt=SYSTEM_PROMPT), |
|
"<|system|>{system_prompt}<|endoftext|>\n<|user|>\n{current}<|endoftext|>\n<|assistant|>".format(system_prompt=SYSTEM_PROMPT), |
|
"\n", |
|
["<|im_start|>", "<|im_end|>", "<|endoftext|>"], |
|
) |
|
|
|
|
|
def set_chatml_preset(): |
|
return ( |
|
"Qwen/Qwen1.5-0.5B-Chat", |
|
"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{context}\n\n{current}<|im_end|>\n<|im_start|>assistant".format(system_prompt=SYSTEM_PROMPT), |
|
"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{current}<|im_end|>\n<|im_start|>assistant".format(system_prompt=SYSTEM_PROMPT), |
|
"\n", |
|
["<|im_start|>", "<|im_end|>"], |
|
) |
|
|
|
|
|
def set_mmt_preset(): |
|
return ( |
|
"facebook/mbart-large-50-one-to-many-mmt", |
|
"{context} {current}", |
|
"{context} {current}", |
|
'{\n\t"src_lang": "en_XX",\n\t"tgt_lang": "fr_XX"\n}', |
|
) |
|
|
|
|
|
def set_towerinstruct_preset(): |
|
return ( |
|
"Unbabel/TowerInstruct-7B-v0.1", |
|
"<|im_start|>user\nSource: {current}\nContext: {context}\nTranslate the above text into French. Use the context to guide your answer.\nTarget:<|im_end|>\n<|im_start|>assistant", |
|
"<|im_start|>user\nSource: {current}\nTranslate the above text into French.\nTarget:<|im_end|>\n<|im_start|>assistant", |
|
"\n", |
|
["<|im_start|>", "<|im_end|>"], |
|
) |
|
|
|
def set_gemma_preset(): |
|
return ( |
|
"google/gemma-2b-it", |
|
"<start_of_turn>user\n{context}\n{current}<end_of_turn>\n<start_of_turn>model", |
|
"<start_of_turn>user\n{current}<end_of_turn>\n<start_of_turn>model", |
|
"\n", |
|
["<start_of_turn>", "<end_of_turn>"], |
|
) |
|
|
|
def set_mistral_instruct_preset(): |
|
return ( |
|
"mistralai/Mistral-7B-Instruct-v0.2" |
|
"[INST]{context}\n{current}[/INST]" |
|
"[INST]{current}[/INST]" |
|
"\n" |
|
) |
|
|
|
def update_code_snippets_fn( |
|
input_current_text: str, |
|
input_context_text: str, |
|
output_current_text: str, |
|
output_context_text: str, |
|
model_name_or_path: str, |
|
attribution_method: str, |
|
attributed_fn: str | None, |
|
context_sensitivity_metric: str, |
|
context_sensitivity_std_threshold: float, |
|
context_sensitivity_topk: int, |
|
attribution_std_threshold: float, |
|
attribution_topk: int, |
|
input_template: str, |
|
output_template: str, |
|
contextless_input_template: str, |
|
contextless_output_template: str, |
|
special_tokens_to_keep: str | list[str] | None, |
|
decoder_input_output_separator: str, |
|
model_kwargs: str, |
|
tokenizer_kwargs: str, |
|
generation_kwargs: str, |
|
attribution_kwargs: str, |
|
) -> tuple[str, str]: |
|
def get_kwargs_str(kwargs: str, name: str, pad: str = " " * 4) -> str: |
|
kwargs_dict = json.loads(kwargs) |
|
return nl + pad + name + '=' + str(kwargs_dict) + ',' if kwargs_dict else '' |
|
nl = "\n" |
|
tq = "\"\"\"" |
|
|
|
python = f"""#!pip install inseq |
|
import inseq |
|
from inseq.commands.attribute_context import attribute_context_with_model |
|
|
|
inseq_model = inseq.load_model( |
|
"{model_name_or_path}", |
|
"{attribution_method}",{get_kwargs_str(model_kwargs, "model_kwargs")}{get_kwargs_str(tokenizer_kwargs, "tokenizer_kwargs")} |
|
) |
|
|
|
pecore_args = AttributeContextArgs( |
|
save_path="pecore_output.json", |
|
viz_path="pecore_output.html", |
|
model_name_or_path="{model_name_or_path}", |
|
attribution_method="{attribution_method}", |
|
attributed_fn="{attributed_fn}", |
|
context_sensitivity_metric="{context_sensitivity_metric}", |
|
special_tokens_to_keep={special_tokens_to_keep}, |
|
context_sensitivity_std_threshold={context_sensitivity_std_threshold}, |
|
attribution_std_threshold={attribution_std_threshold}, |
|
input_current_text=\"\"\"{input_current_text}\"\"\", |
|
input_template=\"\"\"{input_template}\"\"\", |
|
output_template="{output_template}", |
|
contextless_input_current_text=\"\"\"{contextless_input_template}\"\"\", |
|
contextless_output_current_text=\"\"\"{contextless_output_template}\"\"\", |
|
context_sensitivity_topk={context_sensitivity_topk if context_sensitivity_topk > 0 else None}, |
|
attribution_topk={attribution_topk if attribution_topk > 0 else None}, |
|
input_context_text={tq + input_context_text + tq if input_context_text else None}, |
|
output_context_text={tq + output_context_text + tq if output_context_text else None}, |
|
output_current_text={tq + output_current_text + tq if output_current_text else None}, |
|
decoder_input_output_separator={tq + decoder_input_output_separator + tq if decoder_input_output_separator else None},{get_kwargs_str(model_kwargs, "model_kwargs")}{get_kwargs_str(tokenizer_kwargs, "tokenizer_kwargs")}{get_kwargs_str(generation_kwargs, "generation_kwargs")}{get_kwargs_str(attribution_kwargs, "attribution_kwargs")} |
|
) |
|
out = attribute_context_with_model(pecore_args, loaded_model)""" |
|
|
|
bash = f"""pip install inseq |
|
inseq attribute-context \\ |
|
--save-path pecore_output.json \\ |
|
--viz-path pecore_output.html \\ |
|
--model-name-or-path "{model_name_or_path}" \\ |
|
--attribution-method "{attribution_method}" \\ |
|
--attributed-fn "{attributed_fn}" \\ |
|
--context-sensitivity-metric "{context_sensitivity_metric}" \\ |
|
--special-tokens-to-keep {" ".join(special_tokens_to_keep)} \\ |
|
--context-sensitivity-std-threshold {context_sensitivity_std_threshold} \\ |
|
--attribution-std-threshold {attribution_std_threshold} \\ |
|
--input-current-text "{input_current_text}" \\ |
|
--input-template "{input_template}" \\ |
|
--output-template "{output_template}" \\ |
|
--contextless-input-current-text "{contextless_input_template}" \\ |
|
--contextless-output-current-text "{contextless_output_template}" \\ |
|
--context-sensitivity-topk {context_sensitivity_topk if context_sensitivity_topk > 0 else None} \\ |
|
--attribution-topk {attribution_topk if attribution_topk > 0 else None} \\ |
|
--input-context-text "{input_context_text}" \\ |
|
--output-context-text "{output_context_text}" \\ |
|
--output-current-text "{output_current_text}" \\ |
|
--decoder-input-output-separator "{decoder_input_output_separator}" \\ |
|
--model-kwargs "{str(model_kwargs).replace(nl, "")}" \\ |
|
--tokenizer-kwargs "{str(tokenizer_kwargs).replace(nl, "")} \\ |
|
--generation-kwargs "{str(generation_kwargs).replace(nl, "")}" \\ |
|
--attribution-kwargs "{str(attribution_kwargs).replace(nl, "")}" |
|
""" |
|
return python, bash |
|
|