Pra-tham commited on
Commit
b1480b1
β€’
1 Parent(s): 03593d2

fixed bugs

Browse files
Files changed (3) hide show
  1. README.md +6 -7
  2. app.py +53 -224
  3. requirements.txt +5 -5
README.md CHANGED
@@ -1,13 +1,12 @@
1
  ---
2
- title: Deepseekmath
3
- emoji: πŸ’¬
4
- colorFrom: yellow
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.36.1
8
  app_file: app.py
9
  pinned: false
10
- license: apache-2.0
11
  ---
12
 
13
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
1
  ---
2
+ title: Deepseek Math Majority
3
+ emoji: πŸ†
4
+ colorFrom: pink
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,227 +1,56 @@
1
- import gradio as gr
2
- # from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
- from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, set_seed
9
- # from accelerate import infer_auto_device_map as iadm
10
-
11
- import torch
12
- from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
13
-
14
- model_name = "deepseek-ai/deepseek-math-7b-instruct"
15
- tokenizer = AutoTokenizer.from_pretrained(model_name)
16
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
17
- model.generation_config = GenerationConfig.from_pretrained(model_name)
18
- model.generation_config.pad_token_id = model.generation_config.eos_token_id
19
-
20
-
21
-
22
-
23
- def evaluate_response(problem):
24
- # problem=b'what is angle x if angle y is 60 degree and angle z in 60 degree of a traingle'
25
- problem=problem+'\nPlease reason step by step, and put your final answer within \\boxed{}.'
26
- messages = [
27
- {"role": "user", "content": problem}
28
- ]
29
- input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
30
- outputs = model.generate(input_tensor.to(model.device), max_new_tokens=100)
31
-
32
- result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True)
33
- # result_output, code_output = process_output(raw_output)
34
- return result
35
-
36
- # def respond(
37
- # evaluate_response,
38
- # history: list[tuple[str, str]],
39
- # system_message,
40
- # max_tokens,
41
- # temperature,
42
- # top_p,
43
- # ):
44
- # messages = [{"role": "system", "content": system_message}]
45
-
46
- # for val in history:
47
- # if val[0]:
48
- # messages.append({"role": "user", "content": val[0]})
49
- # if val[1]:
50
- # messages.append({"role": "assistant", "content": val[1]})
51
-
52
- # messages.append({"role": "user", "content": message})
53
-
54
- # response = ""
55
 
56
- # for message in client.chat_completion(
57
- # messages,
58
- # max_tokens=max_tokens,
59
- # stream=True,
60
- # temperature=temperature,
61
- # top_p=top_p,
62
- # ):
63
- # token = message.choices[0].delta.content
64
-
65
- # response += token
66
- # yield response
67
-
68
- """
69
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
70
- """
71
- # demo = gr.ChatInterface(
72
- # evaluate_response,
73
- # additional_inputs=[
74
- # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
75
- # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
76
- # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
77
- # gr.Slider(
78
- # minimum=0.1,
79
- # maximum=1.0,
80
- # value=0.95,
81
- # step=0.05,
82
- # label="Top-p (nucleus sampling)",
83
- # ),
84
- # ],
85
- # )
86
-
87
- demo = gr.Interface(
88
- fn=evaluate_response,
89
- inputs=[gr.Textbox(label="Question")],
90
- outputs=gr.Textbox(label="Answer"),
91
- title="Question and Answer Interface",
92
- description="Enter a question."
93
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
-
96
  if __name__ == "__main__":
97
- demo.launch()
98
-
99
-
100
- # import gradio as gr
101
- # # from huggingface_hub import InferenceClient
102
-
103
- # """
104
- # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
105
- # """
106
- # # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
107
- # from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, set_seed
108
- # # from accelerate import infer_auto_device_map as iadm
109
-
110
- # import torch
111
- # from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
112
- # from transformers import BitsAndBytesConfig
113
- # from tqdm import tqdm
114
- # import os
115
-
116
-
117
- # USE_PAST_KEY = True
118
- # import gc
119
- # torch.backends.cuda.enable_mem_efficient_sdp(False)
120
-
121
- # from transformers import (
122
- # AutoModelForCausalLM,
123
- # AutoTokenizer,
124
- # AutoConfig,
125
- # StoppingCriteria,
126
- # set_seed
127
- # )
128
-
129
- # n_repetitions = 1
130
- # TOTAL_TOKENS = 2048
131
-
132
- # MODEL_PATH = "Pra-tham/quant_deepseekmath"
133
- # #"/kaggle/input/gemma/transformers/7b-it/1"
134
-
135
- # # DEEP = True
136
- # import torch
137
-
138
- # from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
139
- # import transformers
140
-
141
-
142
-
143
- # tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
144
-
145
- # model = AutoModelForCausalLM.from_pretrained(
146
- # MODEL_PATH,
147
- # device_map="cpu",
148
- # torch_dtype="auto",
149
- # trust_remote_code=True,
150
-
151
- # )
152
- # pipeline = transformers.pipeline(
153
- # "text-generation",
154
- # model=model,
155
- # tokenizer=tokenizer,
156
- # torch_dtype='auto',
157
- # device_map='cpu',
158
- # )
159
- # from transformers import StoppingCriteriaList
160
-
161
- # class StoppingCriteriaSub(StoppingCriteria):
162
- # def __init__(self, stops = [], encounters=1):
163
- # super().__init__()
164
- # # self.stops = [stop.to("cuda") for stop in stops]
165
- # self.stops = stops
166
-
167
- # def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):
168
- # for stop in self.stops:
169
- # last_token = input_ids[0][-len(stop):]
170
- # if torch.all(torch.eq(stop,last_token)):
171
- # return True
172
- # return False
173
-
174
-
175
- # stop_words = ["```output", "```python", "```\nOutput" , ")\n```" , "``````output"] #,
176
- # stop_words_ids = [tokenizer(stop_word, return_tensors='pt', add_special_tokens=False)['input_ids'].squeeze() for stop_word in stop_words]
177
- # stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])
178
-
179
- # code = """Below is a math problem you are to solve (positive numerical answer):
180
- # \"{}\"
181
- # To accomplish this, first determine a sympy-based approach for solving the problem by listing each step to take and what functions need to be called in each step. Be clear so even an idiot can follow your instructions, and remember, your final answer should be positive integer, not an algebraic expression!
182
- # Write the entire script covering all the steps (use comments and document it well) and print the result. After solving the problem, output the final numerical answer within \\boxed{}.
183
-
184
- # Approach:"""
185
-
186
-
187
- # cot = """Below is a math problem you are to solve (positive numerical answer!):
188
- # \"{}\"
189
- # Analyze this problem and think step by step to come to a solution with programs. After solving the problem, output the final numerical answer within \\boxed{}.\n\n"""
190
-
191
- # promplt_options = [code,cot]
192
-
193
- # import re
194
- # from collections import defaultdict
195
- # from collections import Counter
196
-
197
- # from numpy.random import choice
198
- # import numpy as np
199
-
200
- # tool_instruction = '\n\nPlease integrate natural language reasoning with programs to solve the above problem, and put your final numerical answer within \\boxed{}.\nNote that the intermediary calculations may be real numbers, but the final numercal answer would always be an integer.'
201
-
202
-
203
- # #tool_instruction = " The answer should be given as a non-negative modulo 1000."
204
- # #tool_instruction += '\nPlease integrate natural language reasoning with programs to solve the problem above, and put your final answer within \\boxed{}.'
205
-
206
- # demo = gr.Interface(
207
- # fn=predict,
208
- # inputs=[gr.Textbox(label="Question")],
209
- # outputs=gr.Textbox(label="Answer"),
210
- # title="Question and Answer Interface",
211
- # description="Enter a question."
212
- # )
213
-
214
- # import subprocess
215
-
216
- # def reboot_system():
217
- # try:
218
- # # Execute the reboot command
219
- # subprocess.run(['sudo', 'reboot'], check=True)
220
- # except subprocess.CalledProcessError as e:
221
- # print(f"Error occurred while trying to reboot the system: {e}")
222
-
223
- # if __name__ == "__main__":
224
- # if os.path.exists("temp.txt"):
225
- # os.remove("temp.txt")
226
- # reboot_system()
227
- # demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
+ import gradio as gr
3
+ import ctranslate2
4
+ from transformers import AutoTokenizer
5
+ from huggingface_hub import snapshot_download
6
+
7
+ # Define the model and tokenizer loading
8
+ model_prompt = "Solve the following mathematical problem: "
9
+ tokenizer = AutoTokenizer.from_pretrained("AI-MO/NuminaMath-7B-TIR")
10
+ model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
11
+ generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")
12
+
13
+ # Function to generate predictions using the model
14
+ def get_prediction(question):
15
+ input_text = model_prompt + question
16
+ input_tokens = tokenizer.tokenize(input_text)
17
+ results = generator.generate_batch([input_tokens])
18
+ output_tokens = results[0].sequences[0]
19
+ predicted_answer = tokenizer.convert_tokens_to_string(output_tokens)
20
+ return predicted_answer
21
+
22
+ # Function to perform majority voting across multiple predictions
23
+ def majority_vote(question, num_iterations=10):
24
+ all_predictions = []
25
+ for _ in range(num_iterations):
26
+ prediction = get_prediction(question)
27
+ all_predictions.append(prediction)
28
+ majority_voted_pred = max(set(all_predictions), key=all_predictions.count)
29
+ return majority_voted_pred, all_predictions
30
+
31
+ # Gradio interface for user input and output
32
+ def gradio_interface(question, correct_answer):
33
+ final_prediction, all_predictions = majority_vote(question, num_iterations=10)
34
+ return {
35
+ "Question": question,
36
+ "Generated Answers (10 iterations)": all_predictions,
37
+ "Majority-Voted Prediction": final_prediction,
38
+ "Correct Answer": correct_answer
39
+ }
40
+
41
+ # Gradio app setup
42
+ interface = gr.Interface(
43
+ fn=gradio_interface,
44
+ inputs=[
45
+ gr.Textbox(label="Math Question"),
46
+ gr.Textbox(label="Correct Answer"),
47
+ ],
48
+ outputs=[
49
+ gr.JSON(label="Results"), # Display the results in a JSON format
50
+ ],
51
+ title="Math Question Solver",
52
+ description="Enter a math question to get the model prediction and see all generated answers.",
53
+ )
54
 
 
55
  if __name__ == "__main__":
56
+ interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
- huggingface_hub==0.22.2
2
- transformers==4.40.0
3
- torch==2.3.1
4
- accelerate==0.31.0
5
- bitsandbytes==0.43.1
 
1
+
2
+ gradio
3
+ ctranslate2
4
+ transformers
5
+ torch