nmarafo commited on
Commit
6a4a949
1 Parent(s): 04f780e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -0
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoTokenizer
3
+ from peft import AutoPeftModelForCausalLM
4
+ import torch
5
+
6
+ # Configuración del modelo y tokenizer
7
+ model_id = "TheBloke/Mistral-7B-Instruct-v0.2-GPTQ"
8
+ adapter = "nmarafo/Mistral-7B-Instruct-v0.2-TrueFalse-Feedback-GPTQ"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, return_token_type_ids=False)
10
+ tokenizer.pad_token = tokenizer.eos_token
11
+ model = AutoPeftModelForCausalLM.from_pretrained(model_id, adapter_name=adapter).cuda()
12
+
13
+ def generate_response(question, best_answer, student_answer):
14
+ system_message = "Analyze the question, the expected answer, and the student's response. Determine if the student's answer is conceptually correct in relation to the expected answer, regardless of the exact wording. Return True if the student's answer is correct or False otherwise. Add a brief comment explaining the rationale behind the answer being correct or incorrect."
15
+ prompt = f"{system_message}\n\nQuestion: {question}\nExpected Answer: {best_answer}\nStudent Answer: {student_answer}"
16
+ prompt_template = f'<s>[INST] {prompt} [/INST]'
17
+ input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
18
+ output = model.generate(input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)
19
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
20
+ return response
21
+
22
+ # Crear la interfaz de usuario en Streamlit
23
+ st.title("Evaluador de Respuestas con GPTQ")
24
+
25
+ # Creación del formulario
26
+ with st.form("evaluation_form"):
27
+ question = st.text_input("Pregunta", "")
28
+ best_answer = st.text_input("Mejor Respuesta", "")
29
+ student_answer = st.text_input("Respuesta del Estudiante", "")
30
+ # Botón de envío para el formulario
31
+ submitted = st.form_submit_button("Evaluar")
32
+
33
+ if submitted:
34
+ response = generate_response(question, best_answer, student_answer)
35
+ st.write("Respuesta del Modelo:", response)
36
+