File size: 4,486 Bytes
840ad6e
 
a04fe6d
 
840ad6e
a04fe6d
840ad6e
a04fe6d
 
 
 
840ad6e
 
a04fe6d
840ad6e
0511699
 
840ad6e
 
a04fe6d
840ad6e
53234a6
 
 
 
840ad6e
 
a04fe6d
 
 
840ad6e
 
 
0511699
 
 
 
 
 
 
a04fe6d
 
 
 
 
 
 
 
 
 
840ad6e
 
 
 
 
 
 
 
 
 
 
a04fe6d
840ad6e
 
 
 
 
 
 
 
 
 
 
 
 
 
a04fe6d
840ad6e
 
 
a04fe6d
 
 
 
0511699
840ad6e
 
a04fe6d
840ad6e
 
 
0511699
 
 
 
 
840ad6e
 
 
 
a04fe6d
 
 
840ad6e
 
 
a04fe6d
840ad6e
 
 
 
 
 
0511699
840ad6e
a04fe6d
 
 
 
 
840ad6e
a04fe6d
840ad6e
 
a04fe6d
 
 
840ad6e
 
a04fe6d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import gradio as gr
from huggingface_hub import InferenceClient
import time
from collections import defaultdict

# Model definitions
model_value = {
    "Lake 1": "google/mt5-base",
    "Lake 1 Flash": "google/gemma-2-2b-it",
    "Lake 1 Plus": "google/mt5-large",
    "Lake 1 Pro": "google-bert/bert-base-multilingual-cased"
}

# Access codes for different models
access_codes = {
    "wF99-zXDg-WRiN-qVp8": "pro",
    "8tj82-2UvU-8Lft-Dupb": "plus"
}

# Model access levels
model_access_levels = {
    "google/mt5-base": "everyone",
    "google/gemma-2-2b-it": "plus", "pro",
    "google/mt5-large": "plus", "pro",
    "google-bert/bert-base-multilingual-cased": "pro"
}

# Usage tracking for "Lake 1 Flash"
usage_tracker = defaultdict(list)

def check_access_code(code):
    return access_codes.get(code, None)

def recommend_model(current_model, access_level):
    if current_model == "Lake 1" and access_level == "everyone":
        return "Consider upgrading to Lake 1 Plus for more features."
    elif current_model == "Lake 1 Plus" and access_level == "plus":
        return "Consider upgrading to Lake 1 Pro for advanced features."
    return None

def can_use_flash_model(user_id):
    current_time = time.time()
    usage_tracker[user_id] = [t for t in usage_tracker[user_id] if current_time - t < 5 * 3600]
    
    if len(usage_tracker[user_id]) < 20:
        usage_tracker[user_id].append(current_time)
        return True
    else:
        return False

def respond(message, history, model, access_level):
    messages = []

    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})    
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})

    messages.append({"role": "user", "content": message})

    client = InferenceClient(model)
    response = ""
    for message in client.chat_completion(
        messages,
        max_tokens=512,
        stream=True,
        temperature=0.7,
        top_p=0.95,
    ):
        token = message.choices[0].delta.content
        response += token

    history.append((message, response))
    return response, history

def main(message, history, model_name, access_code, user_id):
    model = model_value[model_name]
    access_level = check_access_code(access_code)
    
    if model == model_value["Lake 1 Flash"]:
        if not can_use_flash_model(user_id):
            return "Usage limit reached for Lake 1 Flash. Please try again later.", history
    
    if model == model_value["Lake 1"]:
        return respond(message, history, model, "everyone")
    elif access_level:
        required_access_level = model_access_levels.get(model, None)
        if access_level == required_access_level:
            return respond(message, history, model, access_level)
        else:
            recommendation = recommend_model(model_name, access_level)
            if recommendation:
                return f"You do not have access to the {model_name}. {recommendation} Please enter a valid access code for this model.", history
            else:
                return f"You do not have access to the {model_name}. Please enter a valid access code for this model.", history
    else:
        return "Invalid access code. Please enter a valid code to use this service.", history

with gr.Blocks() as demo:
    gr.Markdown("## Welcome to the Model Interaction App")
    gr.LoginButton()  # Add Hugging Face login button

    with gr.Row():
        with gr.Column():
            access_code_input = gr.Textbox(label="Access Code", placeholder="Enter your special access code (if needed)")
            user_id_input = gr.Textbox(label="User  ID", placeholder="Enter your user ID")

    with gr.Row():
        with gr.Column():
            model_dropdown = gr.Dropdown(
                label="Choose Model",
                choices=list(model_value.keys()),
                value="Lake 1"  # Changed default value to match model_value keys
            )
            response_output = gr.Textbox(label="Response", interactive=False, placeholder="Response will appear here")

    with gr.Row():
        message_input = gr.Textbox(label="Message", placeholder="Type your message here")
        submit_button = gr.Button("Submit")

    history = gr.State([])

    submit_button.click(
        fn=main,
        inputs=[message_input, history, model_dropdown, access_code_input, user_id_input],
        outputs=[response_output, history]
    )

demo.launch()