Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,36 +1,52 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
3 |
|
|
|
4 |
model_value = {
|
5 |
-
"Lake 1": "
|
6 |
-
"Lake 1
|
7 |
-
"Lake 1
|
|
|
8 |
}
|
9 |
|
10 |
-
|
11 |
-
|
12 |
access_codes = {
|
13 |
"wF99-zXDg-WRiN-qVp8": "pro",
|
14 |
"8tj82-2UvU-8Lft-Dupb": "plus"
|
15 |
}
|
16 |
|
|
|
17 |
model_access_levels = {
|
18 |
"WhiteRabbitNeo/Trinity-13B": "everyone",
|
19 |
"WhiteRabbitNeo/Trinity-33B-v1.0": "plus",
|
20 |
"WhiteRabbitNeo/WhiteRabbitNeo-33B-v1.5": "pro"
|
21 |
}
|
22 |
|
|
|
|
|
|
|
23 |
def check_access_code(code):
|
24 |
return access_codes.get(code, None)
|
25 |
|
26 |
def recommend_model(current_model, access_level):
|
27 |
-
# Suggest a newer model based on the current model and access level
|
28 |
if current_model == "Lake 1" and access_level == "everyone":
|
29 |
return "Consider upgrading to Lake 1 Plus for more features."
|
30 |
elif current_model == "Lake 1 Plus" and access_level == "plus":
|
31 |
return "Consider upgrading to Lake 1 Pro for advanced features."
|
32 |
return None
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def respond(message, history, model, access_level):
|
35 |
messages = []
|
36 |
|
@@ -42,8 +58,7 @@ def respond(message, history, model, access_level):
|
|
42 |
|
43 |
messages.append({"role": "user", "content": message})
|
44 |
|
45 |
-
client
|
46 |
-
|
47 |
response = ""
|
48 |
for message in client.chat_completion(
|
49 |
messages,
|
@@ -58,14 +73,18 @@ def respond(message, history, model, access_level):
|
|
58 |
history.append((message, response))
|
59 |
return response, history
|
60 |
|
61 |
-
def main(message, history, model_name, access_code):
|
62 |
model = model_value[model_name]
|
63 |
access_level = check_access_code(access_code)
|
64 |
|
|
|
|
|
|
|
|
|
65 |
if model == model_value["Lake 1"]:
|
66 |
return respond(message, history, model, "everyone")
|
67 |
elif access_level:
|
68 |
-
required_access_level = model_access_levels
|
69 |
if access_level == required_access_level:
|
70 |
return respond(message, history, model, access_level)
|
71 |
else:
|
@@ -78,9 +97,13 @@ def main(message, history, model_name, access_code):
|
|
78 |
return "Invalid access code. Please enter a valid code to use this service.", history
|
79 |
|
80 |
with gr.Blocks() as demo:
|
|
|
|
|
|
|
81 |
with gr.Row():
|
82 |
with gr.Column():
|
83 |
access_code_input = gr.Textbox(label="Access Code", placeholder="Enter your special access code (if needed)")
|
|
|
84 |
|
85 |
with gr.Row():
|
86 |
with gr.Column():
|
@@ -89,17 +112,18 @@ with gr.Blocks() as demo:
|
|
89 |
choices=list(model_value.keys()),
|
90 |
value="Lake 1" # Changed default value to match model_value keys
|
91 |
)
|
92 |
-
response_output = gr.Textbox(label="Response", interactive=False,
|
93 |
-
|
94 |
-
|
|
|
|
|
95 |
|
96 |
-
|
97 |
|
98 |
submit_button.click(
|
99 |
-
main,
|
100 |
-
inputs=[message_input,
|
101 |
-
outputs=[response_output,
|
102 |
)
|
103 |
|
104 |
-
|
105 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
import time
|
4 |
+
from collections import defaultdict
|
5 |
|
6 |
+
# Model definitions
|
7 |
model_value = {
|
8 |
+
"Lake 1": "google/mt5-base",
|
9 |
+
"Lake 1 Flash": "google/gemma-2-2b-it",
|
10 |
+
"Lake 1 Plus": "google/mt5-large",
|
11 |
+
"Lake 1 Pro": "google-bert/bert-base-multilingual-cased"
|
12 |
}
|
13 |
|
14 |
+
# Access codes for different models
|
|
|
15 |
access_codes = {
|
16 |
"wF99-zXDg-WRiN-qVp8": "pro",
|
17 |
"8tj82-2UvU-8Lft-Dupb": "plus"
|
18 |
}
|
19 |
|
20 |
+
# Model access levels
|
21 |
model_access_levels = {
|
22 |
"WhiteRabbitNeo/Trinity-13B": "everyone",
|
23 |
"WhiteRabbitNeo/Trinity-33B-v1.0": "plus",
|
24 |
"WhiteRabbitNeo/WhiteRabbitNeo-33B-v1.5": "pro"
|
25 |
}
|
26 |
|
27 |
+
# Usage tracking for "Lake 1 Flash"
|
28 |
+
usage_tracker = defaultdict(list)
|
29 |
+
|
30 |
def check_access_code(code):
|
31 |
return access_codes.get(code, None)
|
32 |
|
33 |
def recommend_model(current_model, access_level):
|
|
|
34 |
if current_model == "Lake 1" and access_level == "everyone":
|
35 |
return "Consider upgrading to Lake 1 Plus for more features."
|
36 |
elif current_model == "Lake 1 Plus" and access_level == "plus":
|
37 |
return "Consider upgrading to Lake 1 Pro for advanced features."
|
38 |
return None
|
39 |
|
40 |
+
def can_use_flash_model(user_id):
|
41 |
+
current_time = time.time()
|
42 |
+
usage_tracker[user_id] = [t for t in usage_tracker[user_id] if current_time - t < 5 * 3600]
|
43 |
+
|
44 |
+
if len(usage_tracker[user_id]) < 20:
|
45 |
+
usage_tracker[user_id].append(current_time)
|
46 |
+
return True
|
47 |
+
else:
|
48 |
+
return False
|
49 |
+
|
50 |
def respond(message, history, model, access_level):
|
51 |
messages = []
|
52 |
|
|
|
58 |
|
59 |
messages.append({"role": "user", "content": message})
|
60 |
|
61 |
+
client = InferenceClient(model)
|
|
|
62 |
response = ""
|
63 |
for message in client.chat_completion(
|
64 |
messages,
|
|
|
73 |
history.append((message, response))
|
74 |
return response, history
|
75 |
|
76 |
+
def main(message, history, model_name, access_code, user_id):
|
77 |
model = model_value[model_name]
|
78 |
access_level = check_access_code(access_code)
|
79 |
|
80 |
+
if model == model_value["Lake 1 Flash"]:
|
81 |
+
if not can_use_flash_model(user_id):
|
82 |
+
return "Usage limit reached for Lake 1 Flash. Please try again later.", history
|
83 |
+
|
84 |
if model == model_value["Lake 1"]:
|
85 |
return respond(message, history, model, "everyone")
|
86 |
elif access_level:
|
87 |
+
required_access_level = model_access_levels.get(model, None)
|
88 |
if access_level == required_access_level:
|
89 |
return respond(message, history, model, access_level)
|
90 |
else:
|
|
|
97 |
return "Invalid access code. Please enter a valid code to use this service.", history
|
98 |
|
99 |
with gr.Blocks() as demo:
|
100 |
+
gr.Markdown("## Welcome to the Model Interaction App")
|
101 |
+
gr.LoginButton() # Add Hugging Face login button
|
102 |
+
|
103 |
with gr.Row():
|
104 |
with gr.Column():
|
105 |
access_code_input = gr.Textbox(label="Access Code", placeholder="Enter your special access code (if needed)")
|
106 |
+
user_id_input = gr.Textbox(label="User ID", placeholder="Enter your user ID")
|
107 |
|
108 |
with gr.Row():
|
109 |
with gr.Column():
|
|
|
112 |
choices=list(model_value.keys()),
|
113 |
value="Lake 1" # Changed default value to match model_value keys
|
114 |
)
|
115 |
+
response_output = gr.Textbox(label="Response", interactive=False, placeholder="Response will appear here")
|
116 |
+
|
117 |
+
with gr.Row():
|
118 |
+
message_input = gr.Textbox(label="Message", placeholder="Type your message here")
|
119 |
+
submit_button = gr.Button("Submit")
|
120 |
|
121 |
+
history = gr.State([])
|
122 |
|
123 |
submit_button.click(
|
124 |
+
fn=main,
|
125 |
+
inputs=[message_input, history, model_dropdown, access_code_input, user_id_input],
|
126 |
+
outputs=[response_output, history]
|
127 |
)
|
128 |
|
129 |
+
demo.launch()
|
|