File size: 4,325 Bytes
9b6ed5c bdad414 c2a8456 62495a7 9b6ed5c bdad414 62495a7 080ba0d 62495a7 8b18696 bdad414 62495a7 080ba0d 62495a7 080ba0d 62495a7 080ba0d 62495a7 080ba0d 62495a7 080ba0d 62495a7 4a26e3e 080ba0d 62495a7 080ba0d cbe0528 62495a7 4a26e3e 080ba0d 62495a7 080ba0d 4a26e3e 62495a7 080ba0d 4a26e3e 080ba0d 4a26e3e 080ba0d 4a26e3e 62495a7 bdad414 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
---
library_name: peft
base_model: google/gemma-7b-it
datasets:
- nmarafo/truthful_qa_TrueFalse-Feedback
language:
- en
- es
license: other
license_name: gemma-terms-of-use
license_link: https://ai.google.dev/gemma/terms
---
# Model Card for Model ID
This is an adapter prepared to return True or False depending on whether the student's answer ("student_answer") is correct based on the question ("question") and comparing it with a given answer ("best_answer").
The prompt has the following structure:
```
<start_of_turn>user\n
Analyze the question, the expected answer, and the student's response. Determine if the student's answer is conceptually correct in relation to the expected answer, regardless of the exact wording. An answer will be considered correct if it accurately identifies the key information requested in the question, even if expressed differently. Return True if the student's answer is correct or False otherwise. Add a brief comment explaining the rationale behind the answer being correct or incorrect.
Question: {question}
Expected Answer: {best_answer}
Student Answer: {student_answer}<end_of_turn>\n
<start_of_turn>model"
```
## How to Get Started with the Model
In Google Colab:
```
!pip install -q -U bitsandbytes
!pip install -q -U git+https://github.com/huggingface/transformers.git
!pip install -q -U git+https://github.com/huggingface/peft.git
!pip install -q -U git+https://github.com/huggingface/accelerate.git
!pip install -q -U gradio
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, GemmaTokenizer
from peft import AutoPeftModelForCausalLM
import torch
import re
# Carga el modelo y el tokenizer
model_id = "google/gemma-7b-it"
adapter = "nmarafo/Gemma-7B-it-4bit-TrueFalse-Feedback"
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoPeftModelForCausalLM.from_pretrained(adapter, quantization_config=bnb_config, device_map={"":0})
def predict(question, best_answer, student_answer, language):
if language == "English":
system_message = "Analyze the question, the expected answer, and the student's response. Determine if the student's answer is conceptually correct in relation to the expected answer, regardless of the exact wording. Return True if the student's answer is correct or False otherwise. Add a brief comment explaining the rationale behind the answer being correct or incorrect."
else: # Asumimos que cualquier otra opci贸n ser谩 Espa帽ol
system_message = "Analiza la pregunta, la respuesta esperada y la respuesta del estudiante. Determina si la respuesta del estudiante es conceptualmente correcta en relaci贸n con la respuesta esperada, independientemente de la redacci贸n exacta. Devuelve Verdadero si la respuesta del estudiante es correcta o Falso en caso contrario. A帽ade un breve comentario explicando el razonamiento detr谩s de la correcci贸n o incorrecci贸n de la respuesta."
prompt = f"{system_message}\n\nQuestion: {question}\nExpected Answer: {best_answer}\nStudent Answer: {student_answer}"
prompt_template=f"<bos><start_of_turn>user\n{prompt}<end_of_turn>\n<start_of_turn>model"
# Ajusta aqu铆 para incluir attention_mask
encoding = tokenizer(prompt_template, return_tensors='pt', padding=True, truncation=True, max_length=256)
input_ids = encoding['input_ids'].cuda()
attention_mask = encoding['attention_mask'].cuda()
output = model.generate(input_ids, attention_mask=attention_mask,
temperature=0.5, do_sample=True, top_p=0.49,
top_k=40, max_new_tokens=256, pad_token_id=tokenizer.eos_token_id)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
import gradio as gr
iface = gr.Interface(
fn=predict,
inputs=[
gr.Textbox(lines=2, placeholder="Pregunta"),
gr.Textbox(lines=2, placeholder="Mejor Respuesta"),
gr.Textbox(lines=2, placeholder="Respuesta del Estudiante"),
gr.Radio(choices=["English", "Espa帽ol"], label="Idioma")
],
outputs=gr.Textbox(label="Respuesta del Modelo")
)
iface.launch(share=True,debug=True)
```
### Framework versions
- PEFT 0.8.2 |