Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- .gitattributes +1 -0
- TrillaTag-0.0.3_V1.gguf +3 -0
- app.py +42 -0
- requirements.txt +16 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
TrillaTag-0.0.3_V1.gguf filter=lfs diff=lfs merge=lfs -text
|
TrillaTag-0.0.3_V1.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:edfe79cdc4ecdce7338279a930fdc6ec3a5ce36c0aed475a9423570fb4b1e5e4
|
3 |
+
size 7161089728
|
app.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[ ]:
|
5 |
+
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
from pydantic import BaseModel
|
9 |
+
|
10 |
+
# Assuming ctransformers is a library that allows loading your model
|
11 |
+
from ctransformers import AutoModelForCausalLM
|
12 |
+
|
13 |
+
# Load your model (adjust the path to where your model is located)
|
14 |
+
llm = AutoModelForCausalLM.from_pretrained("TrillaTag-0.0.3_V1.gguf",
|
15 |
+
model_type='mistral',
|
16 |
+
max_new_tokens=1096,
|
17 |
+
threads=3)
|
18 |
+
|
19 |
+
# Define a function that will use your model to generate a response
|
20 |
+
def generate_completion(prompt):
|
21 |
+
try:
|
22 |
+
# Generate a response from your model based on the user's prompt
|
23 |
+
response = llm.generate(prompt)
|
24 |
+
return response
|
25 |
+
except Exception as e:
|
26 |
+
# If something goes wrong, you could log the exception or handle it as needed
|
27 |
+
return str(e) # For simplicity, we just return the error as a string
|
28 |
+
|
29 |
+
# Create a Gradio interface
|
30 |
+
# The first argument is the function to call (our generate_completion function),
|
31 |
+
# followed by the inputs and outputs specifications
|
32 |
+
iface = gr.Interface(fn=generate_completion,
|
33 |
+
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
|
34 |
+
outputs="text",
|
35 |
+
title="TrillaTag Model Generator",
|
36 |
+
description="Enter a prompt to generate text from the TrillaTag Model.")
|
37 |
+
|
38 |
+
# Launch the Gradio app
|
39 |
+
# Setting share=True generates a public link for the interface that anyone can access.
|
40 |
+
# This is useful for sharing your model with others but should be used cautiously for public-facing applications.
|
41 |
+
iface.launch(share=True)
|
42 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python-multipart
|
2 |
+
fastapi
|
3 |
+
pydantic
|
4 |
+
uvicorn
|
5 |
+
requests
|
6 |
+
python-dotenv
|
7 |
+
ctransformers
|
8 |
+
absl-py
|
9 |
+
accelerate
|
10 |
+
aiohttp
|
11 |
+
aiosignal
|
12 |
+
alabaster
|
13 |
+
anaconda-client
|
14 |
+
anaconda-navigator
|
15 |
+
anaconda-project
|
16 |
+
gradio
|