Spaces:
Sleeping
Sleeping
abhaskumarsinha
commited on
Commit
•
45b8aa7
1
Parent(s):
0650031
Added app.py
Browse filesLet's see
app.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
# Import necessary modules
|
4 |
+
from tokenizer.tokenizer import *
|
5 |
+
from models.GPT import build_GPT
|
6 |
+
from inference.inference import *
|
7 |
+
from inference.sampling_strategies.sample_random import *
|
8 |
+
|
9 |
+
# Initialize tokenizer
|
10 |
+
tokenizer = SPM_Tokenizer(vocab_model_file='./tokenizer_.model', input_size=256+1)
|
11 |
+
|
12 |
+
# Define model parameters
|
13 |
+
vocab_size = 454+1
|
14 |
+
input_len = 256
|
15 |
+
|
16 |
+
# Build GPT model
|
17 |
+
GPT, flops = build_GPT(256, vocab_size, 1000, 2, 0, 50, 20, 5)
|
18 |
+
|
19 |
+
# Load pre-trained weights
|
20 |
+
GPT.load_weights('AEON_30M.weights.h5')
|
21 |
+
|
22 |
+
# Default text
|
23 |
+
default_input_text = "Nearly a half-century ago, Apollo 11 astronaut Neil Armstrong walked on the Moon. NASA is now preparing for an ambitious new era of sustainable human spaceflight and discovery. The agency is building the Space Launch System rocket and the Orion spacecraft for human deep space exploration. With the help of commercial and international partners, NASA will develop new opportunities in lunar orbit, including a platform to aid surface exploration and serve as a gateway to Mars."
|
24 |
+
|
25 |
+
# Define the function to generate text based on input
|
26 |
+
def generate_text(input_text, k_value=10, generate_limit=50):
|
27 |
+
generated_text = inference.generate(input_text, generate_limit=generate_limit, k_value=k_value)
|
28 |
+
return generated_text
|
29 |
+
|
30 |
+
# Create Gradio interface blocks
|
31 |
+
with gr.Blocks() as demo:
|
32 |
+
# Warning message for users
|
33 |
+
gr.Markdown("### Warning")
|
34 |
+
gr.Markdown("The current model is not a conversational or text domain-specific model. It was trained on a range of essays and articles on space, religion, philosophy, and current affairs. It works as a text autocompleting model that can be used to fine-tune for different purposes. Enter a text with 100 words or copy-paste it here for the best results.")
|
35 |
+
|
36 |
+
# Model specifications
|
37 |
+
gr.Markdown("### Model Specs")
|
38 |
+
gr.Markdown("This is a 30M parameter model ONLY! This tiny model is free to use for any purpose under the Apache 2.0 license. Once quantized, it can work on mobile CPUs too for tiny language model purposes.")
|
39 |
+
|
40 |
+
# Input components: text input, sliders for k_value and generate_limit
|
41 |
+
with gr.Row():
|
42 |
+
input_text = gr.Textbox(label="Input Text", lines=10, value=default_input_text)
|
43 |
+
k_value = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="K Value")
|
44 |
+
generate_limit = gr.Slider(minimum=1, maximum=500, value=50, step=1, label="Generate Limit")
|
45 |
+
|
46 |
+
# Output component: text output
|
47 |
+
output_text = gr.Textbox(label="Output Text")
|
48 |
+
|
49 |
+
# Button to trigger text generation
|
50 |
+
generate_button = gr.Button("Generate")
|
51 |
+
generate_button.click(generate_text, inputs=[input_text, k_value, generate_limit], outputs=output_text)
|
52 |
+
|
53 |
+
# Launch the Gradio interface
|
54 |
+
demo.launch()
|