Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -1,31 +1,27 @@
|
|
1 |
-
|
2 |
-
from texify.inference import batch_inference
|
3 |
-
from texify.model.model import load_model
|
4 |
-
from texify.model.processor import load_processor
|
5 |
-
from PIL import Image
|
6 |
-
|
7 |
-
title="""# 🙋🏻♂️Welcome to🌟Tonic's👨🏻🔬Texify"""
|
8 |
-
description="""You can upload a picture with a math formula and this model will return latex formulas. Texify is a multimodal input model. You can use this Space to test out the current model [vikp/texify2](https://huggingface.co/vikp/texify2) You can also use vikp/texify2🚀 by cloning this space. Simply click here: [Duplicate Space](https://huggingface.co/spaces/Tonic1/texify?duplicate=true)
|
9 |
-
Join us: TeamTonic is always making cool demos! Join our active builder's community on Discord: [Discord](https://discord.gg/nXx5wbX9) On Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On Github: [Polytonic](https://github.com/tonic-ai) & contribute to [PolyGPT](https://github.com/tonic-ai/polygpt-alpha) You can also join the [texify community here](https://discord.gg/zJSDQJWDe8). Big thanks to Vik Paruchuri for the invite and Huggingface for the Community Grant. Your special attentions are much appreciated.
|
10 |
-
"""
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
|
|
|
15 |
|
16 |
-
|
17 |
-
# img = Image.fromarray(img)
|
18 |
-
|
19 |
-
results = batch_inference([img], model, processor)
|
20 |
|
21 |
-
|
|
|
22 |
|
23 |
with gr.Blocks() as app:
|
24 |
gr.Markdown(title)
|
25 |
gr.Markdown(description)
|
26 |
with gr.Row():
|
27 |
with gr.Column():
|
28 |
-
|
29 |
with gr.Column():
|
30 |
output = gr.Textbox()
|
31 |
image_input.change(process_image, inputs=image_input, outputs=output)
|
|
|
1 |
+
from transformers import pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
+
pipeline = pipeline(
|
4 |
+
"text-generation",
|
5 |
+
model="Nexusflow/NexusRaven-V2-13B",
|
6 |
+
torch_dtype="auto",
|
7 |
+
device_map="auto",
|
8 |
+
)
|
9 |
|
10 |
+
title="""# 🙋🏻♂️Welcome to🌟Tonic's🐦⬛NexusRaven"""
|
11 |
+
description="""this model is used to select and return function calling arguments.
|
12 |
+
"""
|
13 |
|
14 |
+
prompt = prompt_template.format(query="What's the weather like in Seattle right now?")
|
|
|
|
|
|
|
15 |
|
16 |
+
result = pipeline(prompt, max_new_tokens=2048, return_full_text=False, do_sample=False, temperature=0.001)[0]["generated_text"]
|
17 |
+
print (result)
|
18 |
|
19 |
with gr.Blocks() as app:
|
20 |
gr.Markdown(title)
|
21 |
gr.Markdown(description)
|
22 |
with gr.Row():
|
23 |
with gr.Column():
|
24 |
+
input = gr.Textbox()
|
25 |
with gr.Column():
|
26 |
output = gr.Textbox()
|
27 |
image_input.change(process_image, inputs=image_input, outputs=output)
|