Spaces:
Sleeping
Sleeping
File size: 2,707 Bytes
893073b 1797ab3 893073b 1797ab3 893073b 617afda 1797ab3 893073b 1797ab3 893073b 617afda 893073b 617afda 53dd4da 893073b 617afda 1797ab3 617afda 1797ab3 617afda 1797ab3 893073b 1797ab3 2207b72 1797ab3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
# -*- coding: utf-8 -*-
"""Untitled14.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1qm9JqCY6CGVTqzvw3GEAI8BsI_-w1rwP
"""
# !pip install -q -U gradio
# !pip install -q -U torch transformers accelerate einops
# !pip install -q peft
examples = [["Which organelle carries out the synthesis and packaging of digestive enzymes?"],
["What is the change in speed of a moving object per unit time?"] ,
["What is the formula of carbon tetrafluoride?"]]
import gradio as gr
import torch
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
TextIteratorStreamer,
pipeline,
)
from peft import PeftModel, PeftConfig
# Load the tokenizer and models
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True)
config = PeftConfig.from_pretrained("mudogruer/phi-2-SciQ")
base_model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2")
model = PeftModel.from_pretrained(base_model, "mudogruer/phi-2-SciQ")
# Text generation pipeline
phi2 = pipeline(
"text-generation",
tokenizer=tokenizer,
model=model,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
device_map="cpu",
)
def generate(message, max_new_tokens):
# Provide a default value if max_new_tokens is None
if max_new_tokens is None:
max_new_tokens = 50 # Default value; adjust as needed based on your typical use case
instruction = "You are a helpful assistant to 'User'. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
final_prompt = f"Instruction: {instruction}\nUser: {message}\nOutput:"
# Calculate the total length allowed for the model and adjust max_new_tokens
input_length = len(tokenizer.encode(final_prompt))
total_max_length = 512 # Adjust based on your model's max length capability
if input_length + max_new_tokens > total_max_length:
max_new_tokens = total_max_length - input_length # Adjust to not exceed total max length
# Generate text synchronously
response = phi2(final_prompt, max_new_tokens=max_new_tokens)
generated_text = response[0]['generated_text']
# Extract the response
last_response = generated_text.split('Output:')[-1].strip()
return last_response
# Gradio interface setup
with gr.Blocks() as demo:
gr.Markdown("""### Phi-2 Scientific Question Chatbot (Fine-tuned from SciQ dataset)""")
tokens_slider = gr.Slider(8, 128, value=21, label="Maximum new tokens")
chatbot = gr.Interface(fn=generate, inputs=["text", tokens_slider], outputs="text", examples=examples)
demo.launch(share=True)
|