Lake-1 / app.py
BICORP's picture
Update app.py
53234a6 verified
raw
history blame
4.49 kB
import gradio as gr
from huggingface_hub import InferenceClient
import time
from collections import defaultdict
# Model definitions
model_value = {
"Lake 1": "google/mt5-base",
"Lake 1 Flash": "google/gemma-2-2b-it",
"Lake 1 Plus": "google/mt5-large",
"Lake 1 Pro": "google-bert/bert-base-multilingual-cased"
}
# Access codes for different models
access_codes = {
"wF99-zXDg-WRiN-qVp8": "pro",
"8tj82-2UvU-8Lft-Dupb": "plus"
}
# Model access levels
model_access_levels = {
"google/mt5-base": "everyone",
"google/gemma-2-2b-it": "plus", "pro",
"google/mt5-large": "plus", "pro",
"google-bert/bert-base-multilingual-cased": "pro"
}
# Usage tracking for "Lake 1 Flash"
usage_tracker = defaultdict(list)
def check_access_code(code):
return access_codes.get(code, None)
def recommend_model(current_model, access_level):
if current_model == "Lake 1" and access_level == "everyone":
return "Consider upgrading to Lake 1 Plus for more features."
elif current_model == "Lake 1 Plus" and access_level == "plus":
return "Consider upgrading to Lake 1 Pro for advanced features."
return None
def can_use_flash_model(user_id):
current_time = time.time()
usage_tracker[user_id] = [t for t in usage_tracker[user_id] if current_time - t < 5 * 3600]
if len(usage_tracker[user_id]) < 20:
usage_tracker[user_id].append(current_time)
return True
else:
return False
def respond(message, history, model, access_level):
messages = []
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
client = InferenceClient(model)
response = ""
for message in client.chat_completion(
messages,
max_tokens=512,
stream=True,
temperature=0.7,
top_p=0.95,
):
token = message.choices[0].delta.content
response += token
history.append((message, response))
return response, history
def main(message, history, model_name, access_code, user_id):
model = model_value[model_name]
access_level = check_access_code(access_code)
if model == model_value["Lake 1 Flash"]:
if not can_use_flash_model(user_id):
return "Usage limit reached for Lake 1 Flash. Please try again later.", history
if model == model_value["Lake 1"]:
return respond(message, history, model, "everyone")
elif access_level:
required_access_level = model_access_levels.get(model, None)
if access_level == required_access_level:
return respond(message, history, model, access_level)
else:
recommendation = recommend_model(model_name, access_level)
if recommendation:
return f"You do not have access to the {model_name}. {recommendation} Please enter a valid access code for this model.", history
else:
return f"You do not have access to the {model_name}. Please enter a valid access code for this model.", history
else:
return "Invalid access code. Please enter a valid code to use this service.", history
with gr.Blocks() as demo:
gr.Markdown("## Welcome to the Model Interaction App")
gr.LoginButton() # Add Hugging Face login button
with gr.Row():
with gr.Column():
access_code_input = gr.Textbox(label="Access Code", placeholder="Enter your special access code (if needed)")
user_id_input = gr.Textbox(label="User ID", placeholder="Enter your user ID")
with gr.Row():
with gr.Column():
model_dropdown = gr.Dropdown(
label="Choose Model",
choices=list(model_value.keys()),
value="Lake 1" # Changed default value to match model_value keys
)
response_output = gr.Textbox(label="Response", interactive=False, placeholder="Response will appear here")
with gr.Row():
message_input = gr.Textbox(label="Message", placeholder="Type your message here")
submit_button = gr.Button("Submit")
history = gr.State([])
submit_button.click(
fn=main,
inputs=[message_input, history, model_dropdown, access_code_input, user_id_input],
outputs=[response_output, history]
)
demo.launch()