zerogpu-2 / app.py
rphrp1985's picture
Update app.py
58a3a72 verified
raw
history blame
5.63 kB
import gradio as gr
import spaces
import torch
from torch.cuda.amp import autocast
import subprocess
from huggingface_hub import InferenceClient
import os
import psutil
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_and_dispatch
subprocess.run(
"pip install psutil",
shell=True,
)
subprocess.run(
"pip install pynvml gpustat",
shell=True,
)
def print_s1ystem():
ram_info = psutil.virtual_memory()
print(f"Total RAM: {ram_info.total / (1024.0 ** 3)} GB")
print(f"Available RAM: {ram_info.available / (1024.0 ** 3)} GB")
import psutil
import platform
import gpustat
from datetime import datetime
def get_size(bytes, suffix="B"):
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
subprocess.run(
"pip install flash-attn --no-build-isolation",
env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
shell=True,
)
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# pip install 'git+https://github.com/huggingface/transformers.git'
token=os.getenv('token')
print('token = ',token)
from transformers import AutoModelForCausalLM, AutoTokenizer
# model_id = "mistralai/Mistral-7B-v0.3"
model_id = "CohereForAI/aya-23-8B"
tokenizer = AutoTokenizer.from_pretrained(
# model_id
model_id
, token= token,)
model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
# torch_dtype= torch.uint8,
torch_dtype=torch.float16,
# torch_dtype=torch.fl,
attn_implementation="flash_attention_2",
low_cpu_mem_usage=True,
device_map='cuda',
)
#
# device_map = infer_auto_device_map(model, max_memory={0: "79GB", "cpu":"65GB" })
# Load the model with the inferred device map
# model = load_checkpoint_and_dispatch(model, model_id, device_map=device_map, no_split_module_classes=["GPTJBlock"])
# model.half()
@spaces.GPU(duration=60)
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
print_s1ystem()
uname = platform.uname()
print(f"System: {uname.system}")
print(f"Node Name: {uname.node}")
print(f"Release: {uname.release}")
print(f"Version: {uname.version}")
print(f"Machine: {uname.machine}")
print(f"Processor: {uname.processor}")
# GPU Information
gpu_stats = gpustat.GPUStatCollection.new_query()
for gpu in gpu_stats:
print(f"GPU: {gpu.name} Mem Free: {get_size(gpu.memory_free)} Mem Used: {get_size(gpu.memory_used)} Mem Total: {get_size(gpu.memory_total)}")
messages = [{"role": "user", "content": "Hello, how are you?"}]
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to('cuda')
## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
# with autocast():
gen_tokens = model.generate(
input_ids,
max_new_tokens=100,
# do_sample=True,
temperature=0.3,
)
gen_text = tokenizer.decode(gen_tokens[0])
print(gen_text)
yield gen_text
messages = [
{"role": "user", "content": "What is your favourite condiment?"},
{"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"},
{"role": "user", "content": "Do you have mayonnaise recipes?"}
]
# inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
# outputs = model.generate(inputs, max_new_tokens=2000)
# gen_text=tokenizer.decode(outputs[0], skip_special_tokens=True)
# print(gen_text)
# yield gen_text
# for val in history:
# if val[0]:
# messages.append({"role": "user", "content": val[0]})
# if val[1]:
# messages.append({"role": "assistant", "content": val[1]})
# messages.append({"role": "user", "content": message})
# response = ""
# for message in client.chat_completion(
# messages,
# max_tokens=max_tokens,
# stream=True,
# temperature=temperature,
# top_p=top_p,
# ):
# token = message.choices[0].delta.content
# response += token
# yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()