Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import torch | |
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
model_name = 'yuntian-deng/gpt2-implicit-cot-multiplication' | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
def preprocess(num): | |
num = str(num).strip().replace(' ', '') | |
reversed_num = ' '.join(num[::-1]) | |
return reversed_num | |
def postprocess(raw_output): | |
prediction = raw_output.replace(' ', '')[::-1] | |
return prediction | |
def predict_product(num1, num2): | |
input_text = f'{preprocess(num1)} * {preprocess(num2)} =' | |
inputs = tokenizer(input_text, return_tensors='pt').to('cuda' if torch.cuda.is_available() else 'cpu') | |
model.to('cuda' if torch.cuda.is_available() else 'cpu') | |
generated_ids = inputs['input_ids'] | |
prediction = "" | |
correct_product = "" | |
valid_input = True | |
try: | |
num1_int = int(num1) | |
num2_int = int(num2) | |
correct_product = str(num1_int * num2_int) | |
except ValueError: | |
valid_input = False | |
eos_token_id = tokenizer.eos_token_id | |
past_key_values = None | |
for _ in range(100): # Set a maximum limit to prevent infinite loops | |
outputs = model.generate(generated_ids, max_new_tokens=1, do_sample=False, past_key_values=past_key_values, return_dict_in_generate=True, use_cache=True) | |
generated_ids = torch.cat((generated_ids, outputs.sequences[:, -1:]), dim=-1) | |
past_key_values = outputs.past_key_values | |
if outputs.sequences[0, -1].item() == eos_token_id: | |
break | |
output_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) | |
prediction = postprocess(output_text[len(input_text):]) | |
# Create the diff for HighlightedText | |
diff = [] | |
for i in range(max(len(prediction), len(correct_product))): | |
if i < len(prediction) and i < len(correct_product) and prediction[i] == correct_product[i]: | |
diff.append((prediction[i], None)) # No highlight for correct digits | |
elif i < len(prediction) and (i >= len(correct_product) or prediction[i] != correct_product[i]): | |
diff.append((prediction[i], "+")) # Highlight incorrect digits in red | |
if i < len(correct_product) and (i >= len(prediction) or prediction[i] != correct_product[i]): | |
diff.append((correct_product[i], "-")) # Highlight missing/incorrect digits in green | |
yield diff, "" | |
if valid_input: | |
is_correct = prediction == correct_product | |
result_message = "Correct!" if is_correct else f"Incorrect! The correct product is {correct_product}." | |
else: | |
result_message = "Invalid input. Could not evaluate correctness." | |
# Final diff for the complete prediction | |
final_diff = [] | |
for i in range(max(len(prediction), len(correct_product))): | |
if i < len(prediction) and i < len(correct_product) and prediction[i] == correct_product[i]: | |
final_diff.append((prediction[i], None)) # No highlight for correct digits | |
elif i < len(prediction) and (i >= len(correct_product) or prediction[i] != correct_product[i]): | |
final_diff.append((prediction[i], "+")) # Highlight incorrect digits in red | |
if i < len(correct_product) and (i >= len(prediction) or prediction[i] != correct_product[i]): | |
final_diff.append((correct_product[i], "-")) # Highlight missing/incorrect digits in green | |
yield final_diff, result_message | |
demo = gr.Interface( | |
fn=predict_product, | |
inputs=[ | |
gr.Textbox(label='First Number (up to 12 digits)', value='12345'), | |
gr.Textbox(label='Second Number (up to 12 digits)', value='67890'), | |
], | |
outputs=[ | |
gr.HighlightedText(label='Predicted Product with Matching and Unmatching Digits Highlighted', combine_adjacent=True, show_legend=True, color_map={"-": "green", "+": "red"}), | |
gr.HTML(label='Result Message') | |
], | |
title='GPT2 Direct Multiplication Calculator (Without Using Chain-of-Thought)', | |
description='This demo uses GPT2 to directly predict the product of two numbers without using any intermediate reasoning steps. The GPT2 model has been fine-tuned to internalize chain-of-thought reasoning within its hidden states, following our stepwise internalization approach detailed in the paper linked at the bottom of this page.', | |
article=""" | |
- [Paper: From Explicit CoT to Implicit CoT: Learning to Internalize CoT Step by Step](https://arxiv.org/pdf/2405.14838) | |
- [Code Repository](https://github.com/da03/Internalize_CoT_Step_by_Step) | |
- [Tweet Announcement](https://twitter.com/yuntiandeng/status/1795854740879774036) | |
""", | |
clear_btn=None, | |
submit_btn="Multiply!", | |
live=False | |
) | |
demo.launch() | |