Spaces:
Running
Running
import gradio as gr | |
import requests, json | |
public_ip = '71.202.66.108' | |
model = 'llama3.1:latest' # You can replace the model name if needed | |
context = [] | |
ollama_serve = f"http://{public_ip}:11434/api/generate" | |
# Call Ollama API | |
def generate(prompt, context, top_k, top_p, temp): | |
r = requests.post(ollama_serve, | |
json={ | |
'model': model, | |
'prompt': prompt, | |
'context': context, | |
'options': { | |
'top_k': top_k, | |
'temperature': top_p, | |
'top_p': temp | |
} | |
}, | |
stream=True) | |
r.raise_for_status() | |
response = "" | |
for line in r.iter_lines(): | |
body = json.loads(line) | |
response_part = body.get('response', '') | |
if 'error' in body: | |
yield f"Error: {body['error']}" | |
return | |
# Append token to the growing response and yield the entire response so far | |
if response_part: | |
response += response_part | |
yield response # Yield the growing response incrementally | |
if body.get('done', False): | |
context = body.get('context', []) | |
return # End the generator once done | |
def chat(input, chat_history, top_k, top_p, temp): | |
chat_history = chat_history or [] | |
global context | |
# Initialize the user input as part of the chat history | |
chat_history.append((input, "")) # Add user input first | |
response = "" # Initialize empty response | |
# Stream each part of the response as it's received | |
response_stream = generate(input, context, top_k, top_p, temp) | |
for response_part in response_stream: | |
response = response_part # Keep updating with the new part of the response | |
# Update the latest assistant response (the second part of the tuple) | |
chat_history[-1] = (input, response) | |
yield chat_history, chat_history # Yield the updated chat history | |
######################### Gradio Code ########################## | |
# background-image: url('https://cdn.shoplightspeed.com/shops/631940/files/45845092/800x800x3/apple-apple-macpro-trashcan-12-core-27ghz-64gb-1tb.jpg'); | |
block = gr.Blocks(css=""" | |
.chatbox { | |
background-image: url('https://cdn.shoplightspeed.com/shops/631940/files/45845092/800x800x3/apple-apple-macpro-trashcan-12-core-27ghz-64gb-1tb.jpg'); | |
background-size: contain; /* Ensure the image fits the height */ | |
background-repeat: no-repeat; | |
background-position: center; | |
height: 100%; /* Make the chatbox fill the available height */ | |
} | |
""") | |
with block: | |
gr.Markdown("""<h1><center> Trashcan AI </center></h1>""") | |
gr.Markdown("""<h3><center> LLama3.1 hosted on a 2013 "Trashcan" Mac Pro with ollama </center></h3>""") | |
# Add a custom class 'chatbox' to apply the background image | |
chatbot = gr.Chatbot(elem_classes="chatbox") | |
message = gr.Textbox(placeholder="Type here") | |
state = gr.State() | |
with gr.Row(): | |
top_k = gr.Slider(0.0, 100.0, label="top_k", value=40, info="Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)") | |
top_p = gr.Slider(0.0, 1.0, label="top_p", value=0.9, info="Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)") | |
temp = gr.Slider(0.0, 2.0, label="temperature", value=0.8, info="The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)") | |
submit = gr.Button("SEND") | |
# Use .click() to trigger the response streaming | |
submit.click(chat, inputs=[message, state, top_k, top_p, temp], outputs=[chatbot, state]) | |
if __name__ == "__main__": | |
block.launch() | |