Spaces:
Runtime error
Runtime error
File size: 5,114 Bytes
0e289da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
import gradio as gr
import os
import torch
from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
from threading import Thread
from accelerate import init_empty_weights, infer_auto_device_map, disk_offload
# Set environment variables
HF_TOKEN = os.getenv("HF_TOKEN")
DESCRIPTION = '''
<div>
<h1 style="text-align: center;">ContenteaseAI custom trained model</h1>
</div>
'''
LICENSE = """
<p/>
---
For more information, visit our [website](https://contentease.ai).
"""
PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">ContenteaseAI Custom AI trained model</h1>
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Enter the text extracted from the PDF:</p>
</div>
"""
css = """
h1 {
text-align: center;
display: block;
}
"""
def initialize_model(model_name, max_memory=None):
device = torch.device('cpu')
# Load model configuration
config = AutoConfig.from_pretrained(model_name)
with init_empty_weights():
# Initialize model with empty weights
model = AutoModelForCausalLM.from_config(config)
# Create device map based on memory constraints
device_map = infer_auto_device_map(
model, max_memory=max_memory, no_split_module_classes=["GPTNeoXLayer"], dtype="float16"
)
# Determine if offloading is needed
needs_offloading = any(device == 'disk' for device in device_map.values())
if needs_offloading:
# Load model for offloading
model = AutoModelForCausalLM.from_pretrained(
model_name, device_map=device_map, offload_folder="offload",
offload_state_dict=True, torch_dtype=torch.float16
)
offload_directory = "offload/"
# Offload model to disk
disk_offload(model=model, offload_dir=offload_directory)
else:
# Load model normally to specified device
model = AutoModelForCausalLM.from_pretrained(
model_name, torch_dtype=torch.float16
)
model.to(device)
return model
try:
# Initialize the model and tokenizer
model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
model = initialize_model(model_name, max_memory={"cpu": "GiB"})
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN)
except Exception as e:
print(f"Error initializing model: {e}")
exit(1)
terminators = [
tokenizer.eos_token_id,
tokenizer.convert_tokens_to_ids("")
]
def chat_llama3_8b(message: str, history: list, temperature: float, max_new_tokens: int) -> str:
"""
Generate a streaming response using the llama3-8b model.
Args:
message (str): The input message.
history (list): The conversation history used by ChatInterface.
temperature (float): The temperature for generating the response.
max_new_tokens (int): The maximum number of new tokens to generate.
Returns:
str: The generated response.
"""
conversation = []
message += " Extract all relevant keywords and add quantity from the following text and format the result in nested JSON:"
for user, assistant in history:
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
temperature=temperature,
eos_token_id=terminators,
)
if temperature == 0:
generate_kwargs['do_sample'] = False
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
outputs.append(text)
yield "".join(outputs)
# Gradio block
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
with gr.Blocks(fill_height=True, css=css) as demo:
gr.Markdown(DESCRIPTION)
gr.ChatInterface(
fn=chat_llama3_8b,
chatbot=chatbot,
fill_height=True,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
additional_inputs=[
gr.Slider(
minimum=0,
maximum=1,
step=0.1,
value=0.95,
label="Temperature",
render=False
),
gr.Slider(
minimum=128,
maximum=9012,
step=1,
value=512,
label="Max new tokens",
render=False
),
]
)
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.launch(server_port=8000, share=True)
|