Update README.md
Browse files
README.md
CHANGED
@@ -16,12 +16,13 @@ license_link: https://ai.google.dev/gemma/terms
|
|
16 |
This is an adapter prepared to return True or False depending on whether the student's answer ("student_answer") is correct based on the question ("question") and comparing it with a given answer ("best_answer").
|
17 |
The prompt has the following structure:
|
18 |
```
|
19 |
-
<
|
20 |
-
Determine if the student's answer is correct
|
21 |
-
|
22 |
-
Question: {question}
|
23 |
-
Expected Answer: {best_answer}
|
24 |
-
Student Answer: {student_answer}
|
|
|
25 |
```
|
26 |
|
27 |
|
@@ -29,64 +30,72 @@ Student Answer: {student_answer}[/INST]"
|
|
29 |
In Google Colab:
|
30 |
```
|
31 |
|
32 |
-
!pip install -q -U
|
33 |
-
!pip install
|
34 |
-
!pip install
|
|
|
|
|
35 |
|
|
|
36 |
from peft import AutoPeftModelForCausalLM
|
37 |
-
from rich import print
|
38 |
-
from transformers import GenerationConfig, AutoTokenizer
|
39 |
-
|
40 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
-
|
43 |
-
adapter = "nmarafo/Mistral-7B-Instruct-v0.2-TrueFalse-Feedback-GPTQ"
|
44 |
|
45 |
-
|
46 |
-
tokenizer.pad_token = tokenizer.eos_token
|
47 |
|
48 |
-
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
prompt = f"{system_message}\n\nQuestion: {question}\nBest Answer: {best_answer}\nStudent Answer: {student_answer}"
|
53 |
-
prompt_template=f"<s>[INST]{prompt}[/INST]"
|
54 |
|
55 |
-
encoding = tokenizer(prompt_template, return_tensors='pt', padding=True, truncation=True, max_length=
|
56 |
input_ids = encoding['input_ids'].cuda()
|
57 |
attention_mask = encoding['attention_mask'].cuda()
|
58 |
|
59 |
-
output = model.generate(input_ids, attention_mask=attention_mask,
|
60 |
-
temperature=
|
61 |
-
top_k=
|
62 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
```
|
72 |
|
73 |
-
# To perform inference on the test dataset example load the model from the checkpoint
|
74 |
-
persisted_model = AutoPeftModelForCausalLM.from_pretrained(
|
75 |
-
adapter,
|
76 |
-
low_cpu_mem_usage=True,
|
77 |
-
return_dict=True,
|
78 |
-
torch_dtype=torch.float16,
|
79 |
-
device_map="cuda")
|
80 |
-
# Some gen config knobs
|
81 |
-
generation_config = GenerationConfig(
|
82 |
-
penalty_alpha=0.6,
|
83 |
-
do_sample = True,
|
84 |
-
top_k=5,
|
85 |
-
temperature=0.5,
|
86 |
-
repetition_penalty=1.2,
|
87 |
-
max_new_tokens=512
|
88 |
-
)
|
89 |
-
|
90 |
|
91 |
### Framework versions
|
92 |
|
|
|
16 |
This is an adapter prepared to return True or False depending on whether the student's answer ("student_answer") is correct based on the question ("question") and comparing it with a given answer ("best_answer").
|
17 |
The prompt has the following structure:
|
18 |
```
|
19 |
+
<start_of_turn>user\n
|
20 |
+
Analyze the question, the expected answer, and the student's response. Determine if the student's answer is conceptually correct in relation to the expected answer, regardless of the exact wording. An answer will be considered correct if it accurately identifies the key information requested in the question, even if expressed differently. Return True if the student's answer is correct or False otherwise. Add a brief comment explaining the rationale behind the answer being correct or incorrect.
|
21 |
+
|
22 |
+
Question: {question}
|
23 |
+
Expected Answer: {best_answer}
|
24 |
+
Student Answer: {student_answer}<end_of_turn>\n
|
25 |
+
<start_of_turn>model"
|
26 |
```
|
27 |
|
28 |
|
|
|
30 |
In Google Colab:
|
31 |
```
|
32 |
|
33 |
+
!pip install -q -U bitsandbytes
|
34 |
+
!pip install -q -U git+https://github.com/huggingface/transformers.git
|
35 |
+
!pip install -q -U git+https://github.com/huggingface/peft.git
|
36 |
+
!pip install -q -U git+https://github.com/huggingface/accelerate.git
|
37 |
+
!pip install -q -U gradio
|
38 |
|
39 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, GemmaTokenizer
|
40 |
from peft import AutoPeftModelForCausalLM
|
|
|
|
|
|
|
41 |
import torch
|
42 |
+
import re
|
43 |
+
|
44 |
+
# Carga el modelo y el tokenizer
|
45 |
+
model_id = "google/gemma-7b-it"
|
46 |
+
adapter = "nmarafo/Gemma-7B-it-4bit-TrueFalse-Feedback"
|
47 |
+
|
48 |
+
bnb_config = BitsAndBytesConfig(
|
49 |
+
load_in_4bit=True,
|
50 |
+
bnb_4bit_quant_type="nf4",
|
51 |
+
bnb_4bit_compute_dtype=torch.bfloat16
|
52 |
+
)
|
53 |
|
54 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
|
55 |
|
56 |
+
model = AutoPeftModelForCausalLM.from_pretrained(adapter, quantization_config=bnb_config, device_map={"":0})
|
|
|
57 |
|
58 |
+
def predict(question, best_answer, student_answer, language, temperature, top_p, top_k):
|
59 |
+
if language == "English":
|
60 |
+
system_message = "Analyze the question, the expected answer, and the student's response. Determine if the student's answer is conceptually correct in relation to the expected answer, regardless of the exact wording. Return True if the student's answer is correct or False otherwise. Add a brief comment explaining the rationale behind the answer being correct or incorrect."
|
61 |
+
else: # Asumimos que cualquier otra opción será Español
|
62 |
+
system_message = "Analiza la pregunta, la respuesta esperada y la respuesta del estudiante. Determina si la respuesta del estudiante es conceptualmente correcta en relación con la respuesta esperada, independientemente de la redacción exacta. Devuelve Verdadero si la respuesta del estudiante es correcta o Falso en caso contrario. Añade un breve comentario explicando el razonamiento detrás de la corrección o incorrección de la respuesta."
|
63 |
|
64 |
+
prompt = f"{system_message}\n\nQuestion: {question}\nExpected Answer: {best_answer}\nStudent Answer: {student_answer}"
|
65 |
+
prompt_template=f"<start_of_turn>user\n{prompt}<end_of_turn>\n<start_of_turn>model"
|
|
|
|
|
66 |
|
67 |
+
encoding = tokenizer(prompt_template, return_tensors='pt', padding=True, truncation=True, max_length=256)
|
68 |
input_ids = encoding['input_ids'].cuda()
|
69 |
attention_mask = encoding['attention_mask'].cuda()
|
70 |
|
71 |
+
output = model.generate(input_ids, attention_mask=attention_mask,
|
72 |
+
temperature=temperature, do_sample=True, top_p=top_p,
|
73 |
+
top_k=top_k, max_new_tokens=256, pad_token_id=tokenizer.eos_token_id)
|
74 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
75 |
+
pattern = r"\[INST\].*?\[\/INST\]\s*(True|False)"
|
76 |
+
cleaned_response = re.sub(pattern, r"\1", response, flags=re.DOTALL)
|
77 |
+
|
78 |
+
return
|
79 |
+
|
80 |
+
import gradio as gr
|
81 |
+
|
82 |
+
iface = gr.Interface(
|
83 |
+
fn=predict,
|
84 |
+
inputs=[
|
85 |
+
gr.Textbox(lines=2, placeholder="Pregunta"),
|
86 |
+
gr.Textbox(lines=2, placeholder="Mejor Respuesta"),
|
87 |
+
gr.Textbox(lines=2, placeholder="Respuesta del Estudiante"),
|
88 |
+
gr.Radio(choices=["English", "Español"], label="Idioma"),
|
89 |
+
gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.5, label="Temperature"),
|
90 |
+
gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.49, label="Top P"),
|
91 |
+
gr.Slider(minimum=0, maximum=100, step=1, value=40, label="Top K")
|
92 |
+
],
|
93 |
+
outputs=gr.Textbox(label="Respuesta del Modelo")
|
94 |
+
)
|
95 |
+
iface.launch(share=True,debug=True)
|
96 |
|
97 |
```
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
### Framework versions
|
101 |
|