analist commited on
Commit
67e68fa
·
verified ·
1 Parent(s): 4085a4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -81
app.py CHANGED
@@ -1,40 +1,20 @@
1
  import streamlit as st
2
- from ctransformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
- # Configuration de la page Streamlit
6
- st.set_page_config(page_title="Assistant Mathématique", page_icon="🔢", layout="wide")
7
-
8
- @st.cache_resource
9
- def load_model():
10
- """Charge le modèle et le tokenizer (mise en cache par Streamlit)"""
11
- model_name = "analist/deepseek-math-gguf" # Remplacez par votre nom de modèle
12
-
13
- model = AutoModelForCausalLM.from_pretrained(
14
- "analist/deepseek-math-gguf", model_file="model.gguf"
15
- )
16
- tokenizer = AutoTokenizer.from_pretrained(model)
17
- return model, tokenizer
18
-
19
- def generate_response(prompt, model, tokenizer):
20
- """Génère une réponse à partir du prompt"""
21
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
22
-
23
- with torch.no_grad():
24
- outputs = model.generate(
25
- **inputs,
26
- max_new_tokens=1200,
27
- temperature=0.7,
28
- do_sample=True,
29
- top_p=0.95,
30
  )
31
-
32
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
- return response.split("### Response:")[-1].strip()
34
-
35
- def format_prompt(question):
36
- """Formate le prompt comme pendant l'entraînement"""
37
- return f"""Below is an instruction that describes a task, paired with an input that provides further context.
38
  Write a response that appropriately completes the request.
39
  Before answering, think carefully about the question and create a step-by-step chain of thoughts to ensure a logical and accurate response.
40
  Your goal is to teach maths a beginner so make it friendly and accessible. Break down your chain of thoughts as for him/her to understand.
@@ -46,56 +26,42 @@ Please answer the following questions.
46
  ### Question:
47
  {question}
48
 
49
- ### Response:"""
 
 
 
 
 
 
 
 
 
 
50
 
51
  def main():
52
- # Titre de l'application
53
- st.title("🔢 Assistant Mathématique")
54
- st.markdown("---")
55
-
56
- # Chargement du modèle
57
- with st.spinner("Chargement du modèle..."):
58
- model, tokenizer = load_model()
59
-
60
- # Initialisation de l'historique des messages dans la session state
61
- if "messages" not in st.session_state:
62
- st.session_state.messages = []
63
-
64
- # Affichage de l'historique des messages
65
- for message in st.session_state.messages:
66
- with st.chat_message(message["role"]):
67
- st.markdown(message["content"])
68
-
69
- # Zone de saisie utilisateur
70
- if question := st.chat_input("Posez votre question mathématique..."):
71
- # Afficher la question de l'utilisateur
72
- with st.chat_message("user"):
73
- st.markdown(question)
74
- st.session_state.messages.append({"role": "user", "content": question})
75
-
76
- # Générer et afficher la réponse
77
- with st.chat_message("assistant"):
78
- with st.spinner("Réflexion en cours..."):
79
- prompt = format_prompt(question)
80
- response = generate_response(prompt, model, tokenizer)
81
- response = response.replace('<think>', '')
82
- st.markdown(response)
83
- st.session_state.messages.append({"role": "assistant", "content": response})
84
-
85
- # Bouton pour effacer l'historique
86
- if st.sidebar.button("Effacer l'historique"):
87
- st.session_state.messages = []
88
- st.rerun()
89
-
90
- # Informations dans la barre latérale
91
- with st.sidebar:
92
- st.markdown("### À propos")
93
- st.markdown("""
94
- Cet assistant utilise un modèle DeepSeek spécialement entraîné pour:
95
- - Expliquer les concepts mathématiques
96
- - Résoudre des problèmes étape par étape
97
- - Fournir des explications claires et adaptées aux débutants
98
- """)
99
 
100
  if __name__ == "__main__":
101
  main()
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
+ class MathTutor:
6
+ def __init__(self):
7
+ self.model_id = "your-username/deepseek-math-tutor-cpu"
8
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
9
+ self.model = AutoModelForCausalLM.from_pretrained(
10
+ self.model_id,
11
+ torch_dtype=torch.float32,
12
+ low_cpu_mem_usage=True,
13
+ device_map="cpu"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  )
15
+
16
+ def get_response(self, question):
17
+ prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context.
 
 
 
 
18
  Write a response that appropriately completes the request.
19
  Before answering, think carefully about the question and create a step-by-step chain of thoughts to ensure a logical and accurate response.
20
  Your goal is to teach maths a beginner so make it friendly and accessible. Break down your chain of thoughts as for him/her to understand.
 
26
  ### Question:
27
  {question}
28
 
29
+ ### Response:
30
+ <think>"""
31
+
32
+ inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024)
33
+ outputs = self.model.generate(
34
+ **inputs,
35
+ max_new_tokens=1200,
36
+ temperature=0.7,
37
+ do_sample=True
38
+ )
39
+ return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
40
 
41
  def main():
42
+ st.title("🧮 Friendly Math Tutor")
43
+ st.write("Ask me any math question! I'll help you understand step by step.")
44
+
45
+ tutor = MathTutor()
46
+
47
+ question = st.text_area("Your math question:", height=100)
48
+
49
+ if st.button("Get Help"):
50
+ if question:
51
+ with st.spinner("Thinking..."):
52
+ response = tutor.get_response(question)
53
+ explanation = response.split("### Response:")[1]
54
+ st.markdown(explanation)
55
+ else:
56
+ st.warning("Please enter a question!")
57
+
58
+ st.divider()
59
+ st.markdown("""
60
+ Example questions:
61
+ - How do I solve quadratic equations?
62
+ - Explain the concept of derivatives
63
+ - Help me understand trigonometry ratios
64
+ """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
  if __name__ == "__main__":
67
  main()