File size: 2,058 Bytes
115fce4
 
 
291e22e
 
115fce4
291e22e
 
f5c3a62
291e22e
115fce4
 
 
 
 
 
 
 
 
 
 
ffdb4c9
 
 
 
 
 
 
 
 
115fce4
974ca49
ffdb4c9
115fce4
ffdb4c9
 
 
974ca49
115fce4
 
 
 
 
 
 
 
 
 
ffdb4c9
115fce4
 
ffdb4c9
115fce4
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

import gradio as gr
import ctranslate2
from transformers import AutoTokenizer
from huggingface_hub import snapshot_download

# Define the model and tokenizer loading
model_prompt = "Solve the following mathematical problem: "
tokenizer = AutoTokenizer.from_pretrained("AI-MO/NuminaMath-7B-TIR")
model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")

# Function to generate predictions using the model
def get_prediction(question):
    input_text = model_prompt + question
    input_tokens = tokenizer.tokenize(input_text)
    results = generator.generate_batch([input_tokens])
    output_tokens = results[0].sequences[0]
    predicted_answer = tokenizer.convert_tokens_to_string(output_tokens)
    return predicted_answer

# Function to perform majority voting across multiple predictions
def majority_vote(question, num_iterations=10):
    all_predictions = []
    for _ in range(num_iterations):
        prediction = get_prediction(question)
        all_predictions.append(prediction)
    majority_voted_pred = max(set(all_predictions), key=all_predictions.count)
    return majority_voted_pred, all_predictions

# Gradio interface for user input and output
def gradio_interface(question, correct_answer):
    final_prediction, all_predictions = majority_vote(question, num_iterations=10)
    return {
        "Question": question,
        "Generated Answers (10 iterations)": all_predictions,
        "Majority-Voted Prediction": final_prediction,
        "Correct Answer": correct_answer
    }

# Gradio app setup
interface = gr.Interface(
    fn=gradio_interface,
    inputs=[
        gr.Textbox(label="Math Question"),
        gr.Textbox(label="Correct Answer"),
    ],
    outputs=[
        gr.JSON(label="Results"),  # Display the results in a JSON format
    ],
    title="Math Question Solver",
    description="Enter a math question to get the model prediction and see all generated answers.",
)

if __name__ == "__main__":
    interface.launch()