Lake-1 / app.py
BICORP's picture
Update app.py
856f1c7 verified
import gradio as gr
from huggingface_hub import InferenceClient
import time
from collections import defaultdict
# Model definitions with real models
model_value = {
"Lake 1": "meta-llama/Llama-3.2-3B"
}
# Access codes for different models
access_codes = {
"wF99-zXDg-WRiN-qVp8": "pro",
"8tj82-2UvU-8Lft-Dupb": "plus"
}
# Model access levels with real models
model_access_levels = {
"google/mt5-base": "everyone",
"google/gemma-2-2b-it": "plus",
"google/mt5-large": "plus",
"google-bert/bert-base-multilingual-cased": "pro"
}
# Usage tracking for "Lake 1 Flash"
usage_tracker = defaultdict(list)
def check_access_code(code):
return access_codes.get(code, None)
def recommend_model(current_model, access_level):
if current_model == "Lake 1" and access_level == "everyone":
return "Consider upgrading to Lake 1 Plus for more features."
elif current_model == "Lake 1 Plus" and access_level == "plus":
return "Consider upgrading to Lake 1 Pro for advanced features."
return None
def can_use_flash_model():
current_time = time.time()
for user in usage_tracker.keys():
usage_tracker[user] = [t for t in usage_tracker[user] if current_time - t < 5 * 3600]
for user in usage_tracker.keys():
if len(usage_tracker[user]) < 20:
usage_tracker[user].append(current_time)
return True
return False
def respond(message, history, model):
messages = []
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
client = InferenceClient(model)
response = ""
for message in client.chat_completion(
messages,
max_tokens=512,
stream=True,
temperature=0.7,
top_p=0.95,
):
token = message.choices[0].delta.content
response += token
history.append((message, response))
return response, history
def main(message, history, model_name, access_code):
model = model_value[model_name]
access_level = check_access_code(access_code)
# Check if the model is Lake 1 Flash
if model == model_value["Lake 1 Flash"]:
if not can_use_flash_model():
return "Usage limit reached for Lake 1 Flash. Please try again later.", history
# Get the required access level for the selected model
required_access_level = model_access_levels.get(model, None)
# Allow access to Lake 1 for everyone
if required_access_level == "everyone":
return respond(message, history, model)
# Check access for other models
if access_level == "pro" or (access_level == "plus" and required_access_level in ["plus", "everyone"]):
return respond(message, history, model)
# If the user does not have the required access, provide a recommendation
recommendation = recommend_model(model_name, access_level)
if recommendation:
return f"You do not have access to the {model_name}. {recommendation} Please enter a valid access code for this model.", history
else:
return f"You do not have access to the {model_name}. Please enter a valid access code for this model.", history
return "Invalid access code. Please enter a valid code to use this service.", history
with gr.Blocks() as demo:
gr.Markdown("## Welcome to the Model Interaction App")
gr.LoginButton() # Add Hugging Face login button
with gr.Row():
with gr.Column():
access_code_input = gr.Textbox(label="Access Code", placeholder="Enter your special access code (if needed)")
with gr.Row():
with gr.Column():
model_dropdown = gr.Dropdown(
label="Choose Model",
choices=list(model_value.keys()),
value="Lake 1" # Changed default value to match model_value keys
)
response_output = gr.Textbox(label="Response", interactive=False, placeholder="Response will appear here")
with gr.Row():
message_input = gr.Textbox(label="Message", placeholder="Type your message here")
submit_button = gr.Button("Submit")
history = gr.State([])
submit_button.click(
fn=main,
inputs=[message_input, history, model_dropdown, access_code_input],
outputs=[response_output, history]
)
demo.launch()