Spaces:
Sleeping
Sleeping
#!/usr/bin/env python | |
# coding: utf-8 | |
# In[ ]: | |
import gradio as gr | |
from pydantic import BaseModel | |
# Assuming ctransformers is a library that allows loading your model | |
from ctransformers import AutoModelForCausalLM | |
# Load your model (adjust the path to where your model is located) | |
llm = AutoModelForCausalLM.from_pretrained("TrillaTag-0.0.3_V1.gguf", | |
model_type='mistral', | |
max_new_tokens=1096, | |
threads=3) | |
# Define a function that will use your model to generate a response | |
def generate_completion(prompt): | |
try: | |
# Generate a response from your model based on the user's prompt | |
response = llm.generate(prompt) | |
return response | |
except Exception as e: | |
# If something goes wrong, you could log the exception or handle it as needed | |
return str(e) # For simplicity, we just return the error as a string | |
# Create a Gradio interface | |
# The first argument is the function to call (our generate_completion function), | |
# followed by the inputs and outputs specifications | |
iface = gr.Interface(fn=generate_completion, | |
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."), | |
outputs="text", | |
title="TrillaTag Model Generator", | |
description="Enter a prompt to generate text from the TrillaTag Model.") | |
# Launch the Gradio app | |
# Setting share=True generates a public link for the interface that anyone can access. | |
# This is useful for sharing your model with others but should be used cautiously for public-facing applications. | |
iface.launch(share=True) | |