Pra-tham commited on
Commit
03593d2
1 Parent(s): 9f29f2c

revert back

Browse files
Files changed (1) hide show
  1. app.py +185 -86
app.py CHANGED
@@ -10,119 +10,218 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, set_se
10
 
11
  import torch
12
  from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
13
- from transformers import BitsAndBytesConfig
14
- from tqdm import tqdm
15
- import os
16
 
 
 
 
 
 
17
 
18
- USE_PAST_KEY = True
19
- import gc
20
- torch.backends.cuda.enable_mem_efficient_sdp(False)
21
 
22
- from transformers import (
23
- AutoModelForCausalLM,
24
- AutoTokenizer,
25
- AutoConfig,
26
- StoppingCriteria,
27
- set_seed
28
- )
29
 
30
- n_repetitions = 1
31
- TOTAL_TOKENS = 2048
32
 
33
- MODEL_PATH = "Pra-tham/quant_deepseekmath"
34
- #"/kaggle/input/gemma/transformers/7b-it/1"
 
 
 
 
 
 
 
 
 
 
35
 
36
- # DEEP = True
37
- import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
40
- import transformers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
 
 
 
 
 
 
 
42
 
 
 
 
43
 
44
- tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
45
 
46
- model = AutoModelForCausalLM.from_pretrained(
47
- MODEL_PATH,
48
- device_map="cpu",
49
- torch_dtype="auto",
50
- trust_remote_code=True,
51
 
52
- )
53
- pipeline = transformers.pipeline(
54
- "text-generation",
55
- model=model,
56
- tokenizer=tokenizer,
57
- torch_dtype='auto',
58
- device_map='cpu',
59
- )
60
- from transformers import StoppingCriteriaList
61
 
62
- class StoppingCriteriaSub(StoppingCriteria):
63
- def __init__(self, stops = [], encounters=1):
64
- super().__init__()
65
- # self.stops = [stop.to("cuda") for stop in stops]
66
- self.stops = stops
67
 
68
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):
69
- for stop in self.stops:
70
- last_token = input_ids[0][-len(stop):]
71
- if torch.all(torch.eq(stop,last_token)):
72
- return True
73
- return False
74
 
 
 
 
75
 
76
- stop_words = ["```output", "```python", "```\nOutput" , ")\n```" , "``````output"] #,
77
- stop_words_ids = [tokenizer(stop_word, return_tensors='pt', add_special_tokens=False)['input_ids'].squeeze() for stop_word in stop_words]
78
- stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])
 
 
 
 
79
 
80
- code = """Below is a math problem you are to solve (positive numerical answer):
81
- \"{}\"
82
- To accomplish this, first determine a sympy-based approach for solving the problem by listing each step to take and what functions need to be called in each step. Be clear so even an idiot can follow your instructions, and remember, your final answer should be positive integer, not an algebraic expression!
83
- Write the entire script covering all the steps (use comments and document it well) and print the result. After solving the problem, output the final numerical answer within \\boxed{}.
84
 
85
- Approach:"""
 
 
 
 
86
 
 
 
87
 
88
- cot = """Below is a math problem you are to solve (positive numerical answer!):
89
- \"{}\"
90
- Analyze this problem and think step by step to come to a solution with programs. After solving the problem, output the final numerical answer within \\boxed{}.\n\n"""
91
 
92
- promplt_options = [code,cot]
93
 
94
- import re
95
- from collections import defaultdict
96
- from collections import Counter
97
 
98
- from numpy.random import choice
99
- import numpy as np
 
 
 
100
 
101
- tool_instruction = '\n\nPlease integrate natural language reasoning with programs to solve the above problem, and put your final numerical answer within \\boxed{}.\nNote that the intermediary calculations may be real numbers, but the final numercal answer would always be an integer.'
 
 
 
 
 
 
 
 
102
 
 
 
 
 
 
103
 
104
- #tool_instruction = " The answer should be given as a non-negative modulo 1000."
105
- #tool_instruction += '\nPlease integrate natural language reasoning with programs to solve the problem above, and put your final answer within \\boxed{}.'
 
 
 
 
106
 
107
- demo = gr.Interface(
108
- fn=predict,
109
- inputs=[gr.Textbox(label="Question")],
110
- outputs=gr.Textbox(label="Answer"),
111
- title="Question and Answer Interface",
112
- description="Enter a question."
113
- )
114
 
115
- import subprocess
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
- def reboot_system():
118
- try:
119
- # Execute the reboot command
120
- subprocess.run(['sudo', 'reboot'], check=True)
121
- except subprocess.CalledProcessError as e:
122
- print(f"Error occurred while trying to reboot the system: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- if __name__ == "__main__":
125
- if os.path.exists("temp.txt"):
126
- os.remove("temp.txt")
127
- reboot_system()
128
- demo.launch()
 
10
 
11
  import torch
12
  from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
 
 
 
13
 
14
+ model_name = "deepseek-ai/deepseek-math-7b-instruct"
15
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
16
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
17
+ model.generation_config = GenerationConfig.from_pretrained(model_name)
18
+ model.generation_config.pad_token_id = model.generation_config.eos_token_id
19
 
 
 
 
20
 
 
 
 
 
 
 
 
21
 
 
 
22
 
23
+ def evaluate_response(problem):
24
+ # problem=b'what is angle x if angle y is 60 degree and angle z in 60 degree of a traingle'
25
+ problem=problem+'\nPlease reason step by step, and put your final answer within \\boxed{}.'
26
+ messages = [
27
+ {"role": "user", "content": problem}
28
+ ]
29
+ input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
30
+ outputs = model.generate(input_tensor.to(model.device), max_new_tokens=100)
31
+
32
+ result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True)
33
+ # result_output, code_output = process_output(raw_output)
34
+ return result
35
 
36
+ # def respond(
37
+ # evaluate_response,
38
+ # history: list[tuple[str, str]],
39
+ # system_message,
40
+ # max_tokens,
41
+ # temperature,
42
+ # top_p,
43
+ # ):
44
+ # messages = [{"role": "system", "content": system_message}]
45
+
46
+ # for val in history:
47
+ # if val[0]:
48
+ # messages.append({"role": "user", "content": val[0]})
49
+ # if val[1]:
50
+ # messages.append({"role": "assistant", "content": val[1]})
51
+
52
+ # messages.append({"role": "user", "content": message})
53
+
54
+ # response = ""
55
+
56
+ # for message in client.chat_completion(
57
+ # messages,
58
+ # max_tokens=max_tokens,
59
+ # stream=True,
60
+ # temperature=temperature,
61
+ # top_p=top_p,
62
+ # ):
63
+ # token = message.choices[0].delta.content
64
+
65
+ # response += token
66
+ # yield response
67
 
68
+ """
69
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
70
+ """
71
+ # demo = gr.ChatInterface(
72
+ # evaluate_response,
73
+ # additional_inputs=[
74
+ # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
75
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
76
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
77
+ # gr.Slider(
78
+ # minimum=0.1,
79
+ # maximum=1.0,
80
+ # value=0.95,
81
+ # step=0.05,
82
+ # label="Top-p (nucleus sampling)",
83
+ # ),
84
+ # ],
85
+ # )
86
 
87
+ demo = gr.Interface(
88
+ fn=evaluate_response,
89
+ inputs=[gr.Textbox(label="Question")],
90
+ outputs=gr.Textbox(label="Answer"),
91
+ title="Question and Answer Interface",
92
+ description="Enter a question."
93
+ )
94
 
95
+
96
+ if __name__ == "__main__":
97
+ demo.launch()
98
 
 
99
 
100
+ # import gradio as gr
101
+ # # from huggingface_hub import InferenceClient
 
 
 
102
 
103
+ # """
104
+ # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
105
+ # """
106
+ # # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
107
+ # from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, set_seed
108
+ # # from accelerate import infer_auto_device_map as iadm
 
 
 
109
 
110
+ # import torch
111
+ # from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
112
+ # from transformers import BitsAndBytesConfig
113
+ # from tqdm import tqdm
114
+ # import os
115
 
 
 
 
 
 
 
116
 
117
+ # USE_PAST_KEY = True
118
+ # import gc
119
+ # torch.backends.cuda.enable_mem_efficient_sdp(False)
120
 
121
+ # from transformers import (
122
+ # AutoModelForCausalLM,
123
+ # AutoTokenizer,
124
+ # AutoConfig,
125
+ # StoppingCriteria,
126
+ # set_seed
127
+ # )
128
 
129
+ # n_repetitions = 1
130
+ # TOTAL_TOKENS = 2048
 
 
131
 
132
+ # MODEL_PATH = "Pra-tham/quant_deepseekmath"
133
+ # #"/kaggle/input/gemma/transformers/7b-it/1"
134
+
135
+ # # DEEP = True
136
+ # import torch
137
 
138
+ # from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
139
+ # import transformers
140
 
 
 
 
141
 
 
142
 
143
+ # tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
 
 
144
 
145
+ # model = AutoModelForCausalLM.from_pretrained(
146
+ # MODEL_PATH,
147
+ # device_map="cpu",
148
+ # torch_dtype="auto",
149
+ # trust_remote_code=True,
150
 
151
+ # )
152
+ # pipeline = transformers.pipeline(
153
+ # "text-generation",
154
+ # model=model,
155
+ # tokenizer=tokenizer,
156
+ # torch_dtype='auto',
157
+ # device_map='cpu',
158
+ # )
159
+ # from transformers import StoppingCriteriaList
160
 
161
+ # class StoppingCriteriaSub(StoppingCriteria):
162
+ # def __init__(self, stops = [], encounters=1):
163
+ # super().__init__()
164
+ # # self.stops = [stop.to("cuda") for stop in stops]
165
+ # self.stops = stops
166
 
167
+ # def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):
168
+ # for stop in self.stops:
169
+ # last_token = input_ids[0][-len(stop):]
170
+ # if torch.all(torch.eq(stop,last_token)):
171
+ # return True
172
+ # return False
173
 
 
 
 
 
 
 
 
174
 
175
+ # stop_words = ["```output", "```python", "```\nOutput" , ")\n```" , "``````output"] #,
176
+ # stop_words_ids = [tokenizer(stop_word, return_tensors='pt', add_special_tokens=False)['input_ids'].squeeze() for stop_word in stop_words]
177
+ # stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])
178
+
179
+ # code = """Below is a math problem you are to solve (positive numerical answer):
180
+ # \"{}\"
181
+ # To accomplish this, first determine a sympy-based approach for solving the problem by listing each step to take and what functions need to be called in each step. Be clear so even an idiot can follow your instructions, and remember, your final answer should be positive integer, not an algebraic expression!
182
+ # Write the entire script covering all the steps (use comments and document it well) and print the result. After solving the problem, output the final numerical answer within \\boxed{}.
183
+
184
+ # Approach:"""
185
+
186
+
187
+ # cot = """Below is a math problem you are to solve (positive numerical answer!):
188
+ # \"{}\"
189
+ # Analyze this problem and think step by step to come to a solution with programs. After solving the problem, output the final numerical answer within \\boxed{}.\n\n"""
190
+
191
+ # promplt_options = [code,cot]
192
 
193
+ # import re
194
+ # from collections import defaultdict
195
+ # from collections import Counter
196
+
197
+ # from numpy.random import choice
198
+ # import numpy as np
199
+
200
+ # tool_instruction = '\n\nPlease integrate natural language reasoning with programs to solve the above problem, and put your final numerical answer within \\boxed{}.\nNote that the intermediary calculations may be real numbers, but the final numercal answer would always be an integer.'
201
+
202
+
203
+ # #tool_instruction = " The answer should be given as a non-negative modulo 1000."
204
+ # #tool_instruction += '\nPlease integrate natural language reasoning with programs to solve the problem above, and put your final answer within \\boxed{}.'
205
+
206
+ # demo = gr.Interface(
207
+ # fn=predict,
208
+ # inputs=[gr.Textbox(label="Question")],
209
+ # outputs=gr.Textbox(label="Answer"),
210
+ # title="Question and Answer Interface",
211
+ # description="Enter a question."
212
+ # )
213
+
214
+ # import subprocess
215
+
216
+ # def reboot_system():
217
+ # try:
218
+ # # Execute the reboot command
219
+ # subprocess.run(['sudo', 'reboot'], check=True)
220
+ # except subprocess.CalledProcessError as e:
221
+ # print(f"Error occurred while trying to reboot the system: {e}")
222
 
223
+ # if __name__ == "__main__":
224
+ # if os.path.exists("temp.txt"):
225
+ # os.remove("temp.txt")
226
+ # reboot_system()
227
+ # demo.launch()