Spaces:
Sleeping
Sleeping
File size: 4,498 Bytes
840ad6e a04fe6d 840ad6e 5b99dbd 840ad6e 856f1c7 840ad6e a04fe6d 840ad6e 0511699 840ad6e 5b99dbd 840ad6e 53234a6 5b99dbd 53234a6 840ad6e a04fe6d 840ad6e 0511699 5b99dbd a04fe6d 5b99dbd a04fe6d 5b99dbd a04fe6d 840ad6e 5b99dbd 840ad6e 5b99dbd 840ad6e 5b99dbd 840ad6e a04fe6d 840ad6e 5b99dbd 840ad6e 5b99dbd 840ad6e 5b99dbd a04fe6d 5b99dbd a04fe6d 5b99dbd 840ad6e 5b99dbd 840ad6e a04fe6d 840ad6e 0511699 840ad6e a04fe6d 840ad6e a04fe6d 840ad6e a04fe6d 5b99dbd a04fe6d 840ad6e a04fe6d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import gradio as gr
from huggingface_hub import InferenceClient
import time
from collections import defaultdict
# Model definitions with real models
model_value = {
"Lake 1": "meta-llama/Llama-3.2-3B"
}
# Access codes for different models
access_codes = {
"wF99-zXDg-WRiN-qVp8": "pro",
"8tj82-2UvU-8Lft-Dupb": "plus"
}
# Model access levels with real models
model_access_levels = {
"google/mt5-base": "everyone",
"google/gemma-2-2b-it": "plus",
"google/mt5-large": "plus",
"google-bert/bert-base-multilingual-cased": "pro"
}
# Usage tracking for "Lake 1 Flash"
usage_tracker = defaultdict(list)
def check_access_code(code):
return access_codes.get(code, None)
def recommend_model(current_model, access_level):
if current_model == "Lake 1" and access_level == "everyone":
return "Consider upgrading to Lake 1 Plus for more features."
elif current_model == "Lake 1 Plus" and access_level == "plus":
return "Consider upgrading to Lake 1 Pro for advanced features."
return None
def can_use_flash_model():
current_time = time.time()
for user in usage_tracker.keys():
usage_tracker[user] = [t for t in usage_tracker[user] if current_time - t < 5 * 3600]
for user in usage_tracker.keys():
if len(usage_tracker[user]) < 20:
usage_tracker[user].append(current_time)
return True
return False
def respond(message, history, model):
messages = []
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
client = InferenceClient(model)
response = ""
for message in client.chat_completion(
messages,
max_tokens=512,
stream=True,
temperature=0.7,
top_p=0.95,
):
token = message.choices[0].delta.content
response += token
history.append((message, response))
return response, history
def main(message, history, model_name, access_code):
model = model_value[model_name]
access_level = check_access_code(access_code)
# Check if the model is Lake 1 Flash
if model == model_value["Lake 1 Flash"]:
if not can_use_flash_model():
return "Usage limit reached for Lake 1 Flash. Please try again later.", history
# Get the required access level for the selected model
required_access_level = model_access_levels.get(model, None)
# Allow access to Lake 1 for everyone
if required_access_level == "everyone":
return respond(message, history, model)
# Check access for other models
if access_level == "pro" or (access_level == "plus" and required_access_level in ["plus", "everyone"]):
return respond(message, history, model)
# If the user does not have the required access, provide a recommendation
recommendation = recommend_model(model_name, access_level)
if recommendation:
return f"You do not have access to the {model_name}. {recommendation} Please enter a valid access code for this model.", history
else:
return f"You do not have access to the {model_name}. Please enter a valid access code for this model.", history
return "Invalid access code. Please enter a valid code to use this service.", history
with gr.Blocks() as demo:
gr.Markdown("## Welcome to the Model Interaction App")
gr.LoginButton() # Add Hugging Face login button
with gr.Row():
with gr.Column():
access_code_input = gr.Textbox(label="Access Code", placeholder="Enter your special access code (if needed)")
with gr.Row():
with gr.Column():
model_dropdown = gr.Dropdown(
label="Choose Model",
choices=list(model_value.keys()),
value="Lake 1" # Changed default value to match model_value keys
)
response_output = gr.Textbox(label="Response", interactive=False, placeholder="Response will appear here")
with gr.Row():
message_input = gr.Textbox(label="Message", placeholder="Type your message here")
submit_button = gr.Button("Submit")
history = gr.State([])
submit_button.click(
fn=main,
inputs=[message_input, history, model_dropdown, access_code_input],
outputs=[response_output, history]
)
demo.launch() |