Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +97 -0
- requirements.txt +61 -0
app.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
+
|
4 |
+
model_value = {
|
5 |
+
"Trinity 13B (Everyone)": "WhiteRabbitNeo/Trinity-13B",
|
6 |
+
"Trinity 33B (Plus)": "WhiteRabbitNeo/Trinity-33B-v1.0",
|
7 |
+
"White Rabbit Neo 33B v1.5 (Pro)": "WhiteRabbitNeo/WhiteRabbitNeo-33B-v1.5"
|
8 |
+
}
|
9 |
+
|
10 |
+
client = InferenceClient(list(model_value.values()))
|
11 |
+
|
12 |
+
# Define a dictionary for access codes
|
13 |
+
access_codes = {
|
14 |
+
"wF99-zXDg-WRiN-qVp8": "pro", # Example code for pro access
|
15 |
+
"8tj82-2UvU-8Lft-Dupb": "plus" # Example code for plus access
|
16 |
+
}
|
17 |
+
|
18 |
+
# Define model access levels
|
19 |
+
model_access_levels = {
|
20 |
+
"WhiteRabbitNeo/Trinity-13B": "everyone", # No access code needed
|
21 |
+
"WhiteRabbitNeo/Trinity-33B-v1.0": "plus",
|
22 |
+
"WhiteRabbitNeo/WhiteRabbitNeo-33B-v1.5": "pro"
|
23 |
+
}
|
24 |
+
|
25 |
+
def check_access_code(code):
|
26 |
+
return access_codes.get(code, None)
|
27 |
+
|
28 |
+
def respond(message, history, model, access_level):
|
29 |
+
messages = []
|
30 |
+
|
31 |
+
for val in history:
|
32 |
+
if val[0]:
|
33 |
+
messages.append({"role": "user", "content": val[0]})
|
34 |
+
if val[1]:
|
35 |
+
messages.append({"role": "assistant", "content": val[1]})
|
36 |
+
|
37 |
+
messages.append({"role": "user", "content": message})
|
38 |
+
|
39 |
+
client.model = model
|
40 |
+
|
41 |
+
response = ""
|
42 |
+
for message in client.chat_completion(
|
43 |
+
messages,
|
44 |
+
max_tokens=512,
|
45 |
+
stream=True,
|
46 |
+
temperature=0.7,
|
47 |
+
top_p=0.95,
|
48 |
+
):
|
49 |
+
token = message.choices[0].delta.content
|
50 |
+
response += token
|
51 |
+
|
52 |
+
history.append((message, response))
|
53 |
+
return response, history
|
54 |
+
|
55 |
+
def main(message, history, model_name, access_code):
|
56 |
+
model = model_value[model_name]
|
57 |
+
access_level = check_access_code(access_code)
|
58 |
+
|
59 |
+
if model == model_value["Trinity 13B (Everyone)"]:
|
60 |
+
# Everyone can access this model without an access code
|
61 |
+
return respond(message, history, model, "everyone")
|
62 |
+
elif access_level:
|
63 |
+
# Check if the access level is sufficient for the selected model
|
64 |
+
required_access_level = model_access_levels[model]
|
65 |
+
if access_level == required_access_level:
|
66 |
+
return respond(message, history, model, access_level)
|
67 |
+
else:
|
68 |
+
return f"You do not have access to the {model_name}. Please enter a valid access code for this model.", history
|
69 |
+
else:
|
70 |
+
return "Invalid access code. Please enter a valid code to use this service.", history
|
71 |
+
|
72 |
+
with gr.Blocks() as demo:
|
73 |
+
with gr.Row():
|
74 |
+
with gr.Column():
|
75 |
+
access_code_input = gr.Textbox(label="Access Code", placeholder="Enter your special access code (if needed)")
|
76 |
+
|
77 |
+
with gr.Row():
|
78 |
+
with gr.Column():
|
79 |
+
model_dropdown = gr.Dropdown(
|
80 |
+
label="Choose Model",
|
81 |
+
choices=list(model_value.keys()),
|
82 |
+
value="Trinity 13B (Everyone)"
|
83 |
+
)
|
84 |
+
response_output = gr.Textbox(label="Response", interactive=False, lines=10)
|
85 |
+
message_input = gr.Textbox(label="Message", placeholder="Type your message here...")
|
86 |
+
submit_button = gr.Button("Send")
|
87 |
+
|
88 |
+
history_state = gr.State()
|
89 |
+
|
90 |
+
submit_button.click(
|
91 |
+
main,
|
92 |
+
inputs=[message_input, history_state, model_dropdown, access_code_input],
|
93 |
+
outputs=[response_output, history_state]
|
94 |
+
)
|
95 |
+
|
96 |
+
if __name__ == "__main__":
|
97 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==23.2.1
|
2 |
+
annotated-types==0.7.0
|
3 |
+
anyio==4.6.2.post1
|
4 |
+
blinker==1.9.0
|
5 |
+
certifi==2024.8.30
|
6 |
+
charset-normalizer==3.4.0
|
7 |
+
click==8.1.7
|
8 |
+
fastapi==0.115.5
|
9 |
+
ffmpy==0.4.0
|
10 |
+
filelock==3.16.1
|
11 |
+
Flask==3.1.0
|
12 |
+
Flask-Login==0.6.0
|
13 |
+
fsspec==2024.10.0
|
14 |
+
gradio==5.7.1
|
15 |
+
gradio_client==1.5.0
|
16 |
+
h11==0.14.0
|
17 |
+
httpcore==1.0.7
|
18 |
+
httpx==0.28.0
|
19 |
+
huggingface-hub==0.26.2
|
20 |
+
idna==3.10
|
21 |
+
itsdangerous==2.2.0
|
22 |
+
Jinja2==3.1.4
|
23 |
+
markdown-it-py==3.0.0
|
24 |
+
MarkupSafe==2.1.5
|
25 |
+
mdurl==0.1.2
|
26 |
+
numpy==2.1.3
|
27 |
+
orjson==3.10.12
|
28 |
+
packaging==24.2
|
29 |
+
pandas==2.2.3
|
30 |
+
pillow==11.0.0
|
31 |
+
pydantic==2.10.2
|
32 |
+
pydantic_core==2.27.1
|
33 |
+
pydub==0.25.1
|
34 |
+
Pygments==2.18.0
|
35 |
+
python-dateutil==2.9.0.post0
|
36 |
+
python-multipart==0.0.12
|
37 |
+
pytz==2024.2
|
38 |
+
PyYAML==6.0.2
|
39 |
+
regex==2024.11.6
|
40 |
+
regobj==0.2.2
|
41 |
+
requests==2.32.3
|
42 |
+
rich==13.9.4
|
43 |
+
ruff==0.8.1
|
44 |
+
safehttpx==0.1.1
|
45 |
+
safetensors==0.4.5
|
46 |
+
semantic-version==2.10.0
|
47 |
+
shellingham==1.5.4
|
48 |
+
six==1.16.0
|
49 |
+
sniffio==1.3.1
|
50 |
+
starlette==0.41.3
|
51 |
+
tokenizers==0.20.3
|
52 |
+
tomlkit==0.12.0
|
53 |
+
tqdm==4.67.0
|
54 |
+
transformers==4.46.3
|
55 |
+
typer==0.14.0
|
56 |
+
typing_extensions==4.12.2
|
57 |
+
tzdata==2024.2
|
58 |
+
urllib3==2.2.3
|
59 |
+
uvicorn==0.32.1
|
60 |
+
websockets==12.0
|
61 |
+
Werkzeug==3.1.3
|