Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,68 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer
|
|
|
3 |
from gemma.modeling_gemma import GemmaForCausalLM
|
4 |
import torch
|
5 |
import time
|
6 |
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
def inference(input_text):
|
10 |
start_time = time.time()
|
@@ -19,13 +77,6 @@ def inference(input_text):
|
|
19 |
end_time = time.time()
|
20 |
return {"output": res, "latency": f"{end_time - start_time:.2f} seconds"}
|
21 |
|
22 |
-
# Initialize the tokenizer and model
|
23 |
-
model_id = "NexaAIDev/Octopus-v2"
|
24 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
25 |
-
model = GemmaForCausalLM.from_pretrained(
|
26 |
-
model_id, torch_dtype=torch.bfloat16, device_map="auto"
|
27 |
-
)
|
28 |
-
|
29 |
def gradio_interface(input_text):
|
30 |
nexa_query = f"Below is the query from the users, please call the correct function and generate the parameters to call the function.\n\nQuery: {input_text} \n\nResponse:"
|
31 |
result = inference(nexa_query)
|
@@ -35,8 +86,9 @@ iface = gr.Interface(
|
|
35 |
fn=gradio_interface,
|
36 |
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your query here..."),
|
37 |
outputs=[gr.outputs.Textbox(label="Output"), gr.outputs.Textbox(label="Latency")],
|
38 |
-
title=
|
39 |
-
description=
|
|
|
40 |
)
|
41 |
|
42 |
if __name__ == "__main__":
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer
|
3 |
+
import gemma
|
4 |
from gemma.modeling_gemma import GemmaForCausalLM
|
5 |
import torch
|
6 |
import time
|
7 |
|
8 |
+
title = "Tonic's 🐙🐙Octopus"
|
9 |
+
description = "Octopus-V2-2B, an advanced open-source language model with 2 billion parameters, represents Nexa AI's research breakthrough in the application of large language models (LLMs) for function calling, specifically tailored for Android APIs. Unlike Retrieval-Augmented Generation (RAG) methods, which require detailed descriptions of potential function arguments—sometimes needing up to tens of thousands of input tokens—Octopus-V2-2B introduces a unique functional token strategy for both its training and inference stages. This approach not only allows it to achieve performance levels comparable to GPT-4 but also significantly enhances its inference speed beyond that of RAG-based methods, making it especially beneficial for edge computing devices."
|
10 |
+
|
11 |
+
|
12 |
+
#From NexusRaven2 Notebook : https://github.com/nexusflowai/NexusRaven-V2/blob/master/How-To-Prompt.ipynb
|
13 |
+
example1 = '''def get_weather_data(coordinates):
|
14 |
+
"""
|
15 |
+
Fetches weather data from the Open-Meteo API for the given latitude and longitude.
|
16 |
+
|
17 |
+
Args:
|
18 |
+
coordinates (tuple): The latitude of the location.
|
19 |
+
|
20 |
+
Returns:
|
21 |
+
float: The current temperature in the coordinates you've asked for
|
22 |
+
"""
|
23 |
+
|
24 |
+
def get_coordinates_from_city(city_name):
|
25 |
+
"""
|
26 |
+
Fetches the latitude and longitude of a given city name using the Maps.co Geocoding API.
|
27 |
+
|
28 |
+
Args:
|
29 |
+
city_name (str): The name of the city.
|
30 |
+
|
31 |
+
Returns:
|
32 |
+
tuple: The latitude and longitude of the city.
|
33 |
+
|
34 |
+
What's the weather like in Seattle right now?
|
35 |
+
'''
|
36 |
+
|
37 |
+
example2 = '''Function:
|
38 |
+
def add_edge(u, v):
|
39 |
+
"""
|
40 |
+
Adds an edge between node u and node v in the graph. Make sure to create a graph first by calling create_new_graph!
|
41 |
+
|
42 |
+
Args:
|
43 |
+
u (str): Node name as string
|
44 |
+
v (str): Node name as string
|
45 |
+
"""
|
46 |
+
|
47 |
+
Function:
|
48 |
+
def is_two_nodes_connected(u, v):
|
49 |
+
"""
|
50 |
+
Answers if two nodes are connected.
|
51 |
+
"""
|
52 |
+
|
53 |
+
Emma is friends with Bob and Charlie, and Charlie is friends with Erik, and Erik is friends with Brian. Can you represent all of these relationship as a graph and answer if Emma is friends with Erik?
|
54 |
+
'''
|
55 |
+
|
56 |
+
EXAMPLES = [
|
57 |
+
[example1],
|
58 |
+
[example2]
|
59 |
+
]
|
60 |
+
|
61 |
+
model_id = "NexaAIDev/Octopus-v2"
|
62 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
63 |
+
model = GemmaForCausalLM.from_pretrained(
|
64 |
+
model_id, torch_dtype=torch.bfloat16, device_map="auto"
|
65 |
+
)
|
66 |
|
67 |
def inference(input_text):
|
68 |
start_time = time.time()
|
|
|
77 |
end_time = time.time()
|
78 |
return {"output": res, "latency": f"{end_time - start_time:.2f} seconds"}
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
def gradio_interface(input_text):
|
81 |
nexa_query = f"Below is the query from the users, please call the correct function and generate the parameters to call the function.\n\nQuery: {input_text} \n\nResponse:"
|
82 |
result = inference(nexa_query)
|
|
|
86 |
fn=gradio_interface,
|
87 |
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your query here..."),
|
88 |
outputs=[gr.outputs.Textbox(label="Output"), gr.outputs.Textbox(label="Latency")],
|
89 |
+
title=title,
|
90 |
+
description=description,
|
91 |
+
examples=EXAMPLES
|
92 |
)
|
93 |
|
94 |
if __name__ == "__main__":
|